From a11042365f93cf235ecc6f8b1a615cf3edd3e75a Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 24 Aug 2012 22:10:38 +0000 Subject: [PATCH 01/62] HDFS-3731. 2.0 release upgrade must handle blocks being written from 1.0. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377137 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/server/common/Storage.java | 6 + .../hdfs/server/datanode/DataStorage.java | 35 ++++- .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 123 +++++++++++++----- .../src/test/resources/hadoop1-bbw.tgz | Bin 0 -> 40234 bytes 5 files changed, 132 insertions(+), 35 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop1-bbw.tgz diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7d341d8482..ba2a8b7564 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -673,6 +673,9 @@ Branch-2 ( Unreleased changes ) HDFS-3715. Fix TestFileCreation#testFileCreationNamenodeRestart. (Andrew Whang via eli) + HDFS-3731. 2.0 release upgrade must handle blocks being written from 1.0. + (Colin Patrick McCabe via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 909d57d526..ca596a2b0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -86,6 +86,12 @@ public abstract class Storage extends StorageInfo { public static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp"; public static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint"; + /** + * The blocksBeingWritten directory which was used in some 1.x and earlier + * releases. + */ + public static final String STORAGE_1_BBW = "blocksBeingWritten"; + public enum StorageState { NON_EXISTENT, NOT_FORMATTED, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 221d6b2d73..b0675ef09c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -451,6 +451,8 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { File curDir = sd.getCurrentDir(); File prevDir = sd.getPreviousDir(); + File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW); + assert curDir.exists() : "Data node current directory must exist."; // Cleanup directory "detach" cleanupDetachDir(new File(curDir, STORAGE_DIR_DETACHED)); @@ -471,7 +473,7 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(), nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID()); bpStorage.format(curDir, nsInfo); - linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT)); + linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT)); // 4. Write version file under /current layoutVersion = HdfsConstants.LAYOUT_VERSION; @@ -578,15 +580,21 @@ void doFinalize(StorageDirectory sd) throws IOException { + "; cur CTime = " + this.getCTime()); assert sd.getCurrentDir().exists() : "Current directory must exist."; final File tmpDir = sd.getFinalizedTmp();//finalized.tmp directory + final File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW); // 1. rename previous to finalized.tmp rename(prevDir, tmpDir); // 2. delete finalized.tmp dir in a separate thread + // Also delete the blocksBeingWritten from HDFS 1.x and earlier, if + // it exists. new Daemon(new Runnable() { @Override public void run() { try { deleteDir(tmpDir); + if (bbwDir.exists()) { + deleteDir(bbwDir); + } } catch(IOException ex) { LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex); } @@ -620,11 +628,16 @@ void finalizeUpgrade(String bpID) throws IOException { /** * Hardlink all finalized and RBW blocks in fromDir to toDir - * @param fromDir directory where the snapshot is stored - * @param toDir the current data directory - * @throws IOException if error occurs during hardlink + * + * @param fromDir The directory where the 'from' snapshot is stored + * @param fromBbwDir In HDFS 1.x, the directory where blocks + * that are under construction are stored. + * @param toDir The current data directory + * + * @throws IOException If error occurs during hardlink */ - private void linkAllBlocks(File fromDir, File toDir) throws IOException { + private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir) + throws IOException { HardLink hardLink = new HardLink(); // do the link int diskLayoutVersion = this.getLayoutVersion(); @@ -632,13 +645,23 @@ private void linkAllBlocks(File fromDir, File toDir) throws IOException { // hardlink finalized blocks in tmpDir/finalized linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); - // hardlink rbw blocks in tmpDir/finalized + // hardlink rbw blocks in tmpDir/rbw linkBlocks(new File(fromDir, STORAGE_DIR_RBW), new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink); } else { // pre-RBW version // hardlink finalized blocks in tmpDir linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); + if (fromBbwDir.exists()) { + /* + * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw + * directory. It's a little messy, because the blocksBeingWriten was + * NOT underneath the 'current' directory in those releases. See + * HDFS-3731 for details. + */ + linkBlocks(fromBbwDir, + new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink); + } } LOG.info( hardLink.linkStats.report() ); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 8db1741e82..8d71791fd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -39,7 +39,9 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; @@ -49,8 +51,9 @@ * This tests data transfer protocol handling in the Datanode. It sends * various forms of wrong data and verifies that Datanode handles it well. * - * This test uses the following two file from src/test/.../dfs directory : - * 1) hadoop-version-dfs-dir.tgz : contains DFS directories. + * This test uses the following items from src/test/.../dfs directory : + * 1) hadoop-22-dfs-dir.tgz and other tarred pre-upgrade NN / DN + * directory images * 2) hadoop-dfs-dir.txt : checksums that are compared in this test. * Please read hadoop-dfs-dir.txt for more information. */ @@ -62,14 +65,23 @@ public class TestDFSUpgradeFromImage { new File(MiniDFSCluster.getBaseDirectory()); private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; - - public int numDataNodes = 4; - + private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz"; + private static class ReferenceFileInfo { String path; long checksum; } + private static final Configuration upgradeConf; + + static { + upgradeConf = new HdfsConfiguration(); + upgradeConf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off + if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Maven + System.setProperty("test.build.data", "build/test/data"); + } + } + LinkedList refList = new LinkedList(); Iterator refIter; @@ -137,11 +149,33 @@ private void verifyChecksum(String path, long checksum) throws IOException { } } - CRC32 overallChecksum = new CRC32(); + /** + * Try to open a file for reading several times. + * + * If we fail because lease recovery hasn't completed, retry the open. + */ + private static FSInputStream dfsOpenFileWithRetries(DistributedFileSystem dfs, + String pathName) throws IOException { + IOException exc = null; + for (int tries = 0; tries < 10; tries++) { + try { + return dfs.dfs.open(pathName); + } catch (IOException e) { + exc = e; + } + if (!exc.getMessage().contains("Cannot obtain " + + "block length for LocatedBlock")) { + throw exc; + } + try { + Thread.sleep(1000); + } catch (InterruptedException ignored) {} + } + throw exc; + } - private void verifyDir(DistributedFileSystem dfs, Path dir) - throws IOException { - + private void verifyDir(DistributedFileSystem dfs, Path dir, + CRC32 overallChecksum) throws IOException { FileStatus[] fileArr = dfs.listStatus(dir); TreeMap fileMap = new TreeMap(); @@ -157,11 +191,11 @@ private void verifyDir(DistributedFileSystem dfs, Path dir) overallChecksum.update(pathName.getBytes()); if ( isDir ) { - verifyDir(dfs, path); + verifyDir(dfs, path, overallChecksum); } else { // this is not a directory. Checksum the file data. CRC32 fileCRC = new CRC32(); - FSInputStream in = dfs.dfs.open(pathName); + FSInputStream in = dfsOpenFileWithRetries(dfs, pathName); byte[] buf = new byte[4096]; int nRead = 0; while ( (nRead = in.read(buf, 0, buf.length)) > 0 ) { @@ -175,7 +209,8 @@ private void verifyDir(DistributedFileSystem dfs, Path dir) private void verifyFileSystem(DistributedFileSystem dfs) throws IOException { - verifyDir(dfs, new Path("/")); + CRC32 overallChecksum = new CRC32(); + verifyDir(dfs, new Path("/"), overallChecksum); verifyChecksum("overallCRC", overallChecksum.getValue()); @@ -237,7 +272,8 @@ public void testFailOnPreUpgradeImage() throws IOException { @Test public void testUpgradeFromRel22Image() throws IOException { unpackStorage(HADOOP22_IMAGE); - upgradeAndVerify(); + upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf). + numDataNodes(4)); } /** @@ -259,7 +295,8 @@ public void testUpgradeFromCorruptRel22Image() throws IOException { // Upgrade should now fail try { - upgradeAndVerify(); + upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf). + numDataNodes(4)); fail("Upgrade did not fail with bad MD5"); } catch (IOException ioe) { String msg = StringUtils.stringifyException(ioe); @@ -268,21 +305,34 @@ public void testUpgradeFromCorruptRel22Image() throws IOException { } } } - - private void upgradeAndVerify() throws IOException { + + static void recoverAllLeases(DFSClient dfs, + Path path) throws IOException { + String pathStr = path.toString(); + HdfsFileStatus status = dfs.getFileInfo(pathStr); + if (!status.isDir()) { + dfs.recoverLease(pathStr); + return; + } + byte prev[] = HdfsFileStatus.EMPTY_NAME; + DirectoryListing dirList; + do { + dirList = dfs.listPaths(pathStr, prev); + HdfsFileStatus files[] = dirList.getPartialListing(); + for (HdfsFileStatus f : files) { + recoverAllLeases(dfs, f.getFullPath(path)); + } + prev = dirList.getLastName(); + } while (dirList.hasMore()); + } + + private void upgradeAndVerify(MiniDFSCluster.Builder bld) + throws IOException { MiniDFSCluster cluster = null; try { - Configuration conf = new HdfsConfiguration(); - if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant - System.setProperty("test.build.data", "build/test/data"); - } - conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDataNodes) - .format(false) - .startupOption(StartupOption.UPGRADE) - .clusterId("testClusterId") - .build(); + bld.format(false).startupOption(StartupOption.UPGRADE) + .clusterId("testClusterId"); + cluster = bld.build(); cluster.waitActive(); DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); DFSClient dfsClient = dfs.dfs; @@ -293,12 +343,27 @@ private void upgradeAndVerify() throws IOException { Thread.sleep(1000); } catch (InterruptedException ignored) {} } - + recoverAllLeases(dfsClient, new Path("/")); verifyFileSystem(dfs); } finally { if (cluster != null) { cluster.shutdown(); } } } - + /** + * Test upgrade from a 1.x image with some blocksBeingWritten + */ + @Test + public void testUpgradeFromRel1BBWImage() throws IOException { + unpackStorage(HADOOP1_BBW_IMAGE); + Configuration conf = new Configuration(upgradeConf); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + System.getProperty("test.build.data") + File.separator + + "dfs" + File.separator + + "data" + File.separator + + "data1"); + upgradeAndVerify(new MiniDFSCluster.Builder(conf). + numDataNodes(1).enableManagedDfsDirsRedundancy(false). + manageDataDfsDirs(false)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop1-bbw.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop1-bbw.tgz new file mode 100644 index 0000000000000000000000000000000000000000..2574f8b7d1307901b1cf95e2fbaa4b3acd88c90e GIT binary patch literal 40234 zcmeI42~<;QmdC5xwy<14MF9axRiQ$w2+F>u0ZR%H5D*X$(kiPgQI@dhRmr9TVnYeC ziwKAkB0|_9#V%_KB8!9&Swe&m!WKfd_r{v(?&Rtd3o=?@4N5* zfA@ap3 zd>%Z>p(-=|d)dIlo*Y^^t%}xkhs(odIvv~9Bd&DbK9lZ`U_P5r?tVNz4(Xfso9uRQ zmUhmFley-8#J8TjCx<@wWFeWXv4G=7<^=-j>-^as16d8)NgDxBPY%JXeL3z3G05vB z@n;sHh`Ca*AYf+j@^;DsWPmRf6(qK86r|E<<%(sg{0ICYAT6hSR zK~)A>R~(P}*3}QxVX*TeU}3nJsMIsnc`eqw;mjpwDOuFool zdw4LMf1`cMM-*OqC}fNuWh0S&jhy(8%6eGoo^V`Ro8oCkejbijw?Qq*E+Fe z>lHJ+M2mWk$}KRikb$TKTXbm9BczOi;TZ)DDcxj0h=KH*;=m%P^ia3VPlS& zFE=;tU9Ksm+IJn7V3j|swe6ngLgjVOpemgxxpyEkG1nlJKcG&eQ5H5c1SiZ=_h4%q zYK-I$BTwgR?D!&XugOn*O%zuXI0dIwP2IX3m0cbDGg%GId4&2LJe2H&hw^YxIS$G- z!}NIG{>$x!ho9gEpo>++p#hglw+!bu;yA~{8CPH36){C6!BpJ@W(;K7zj$IESiA2z z56q_-My1W&^Pm;(N}?IWu**_u{5fW@4VKrw@_d@9G!?!25sU^eXs5=Nx*M%;mI;g~VJjkpYW`sZuydl=i4?Oz-DNY$Yvt{#+-x!TL5Nz*d_Brpnszp^I2fSbHFd{PIPG+38 z8GgYz9%!JXWaXV(VvcSeV}=NFa>o!h^Fxn#UQTYwui7!375wT+6~~7AE1xC1I^5>si<5FpFRo!7PGV z^l@1PYY^5TtU*|Va5eICkucD2!Xd@@=dupd3#J!LFPL61Utki#B!o!_lknq`kUCoZ zTs9VF5zHc(MKFs#E{k9d!Wx7%2x}0oMt&|5HsE}nG(Y{2b=SxA2R&^;r3?Q4GbUjH zuU+%x-$^8xr6!A6krCDsxKj>a?fAm46XWdVsbB39`gKkI0x98{vX|hJ)5tkp_fsSV z%;xxi^vC~u$B2}3{u4^sY(j9*a7dSL_Qq&bvvn=8CqlI9p|B?#jiG;a=L4_0(8|}3 ziR{BXAeavAbT}j)hKOzWpP zNXye5qgGkaXN6Dk+0gHCP?pUp5_B0YPhE8E75DU;CEjyzWUyZPbe>VIDnE#x;vy54 zT$s?+03k`SkrVt`+z*4~Jpfpn64md;g)VttsTmja3IOMN=>6{ui?e@qAe?&O)B^{@ z2YB=$V!{slcn+(*l?JV!#GjYS(6{owCMn(ES2VRa_Xj}DAkxFYCmXyG=@Yt8*>Kn4 zgal4c>U1B|@kEj1?mL~h z*P)Wt*Gx-%_V2m8%$sFIEd3#F_*k(mxw_iSQcQWBmz$K?xIVI39OvtYx+rO)=6ma_ zN6l|!_M7bcI_k5Wl08-vau3H(Y$i%Sy1q?M##P^^5O~?m&^X#c?BPZ0F0X&Ju^62u zFkGEwTtztk;p5LA9>K4x@?xFK)*Z>pEerMQgLWvDGF&+7dHS*g@oy3-1A(=b_n{xM z(w*(h`>tI2EZyROk}pBlQ>p#)yAu7{4TQFH+arAICj~NIz75g(OEmXW$uCW0wqLn_ z%vs(nUMML>ue-2QOW1GfQ%1%SEQ?KITHNtg+M~RP6pZ%)F^!qKohH)>ZJ7$t8repH zznrv-=DPJm5dzDoLdS?CQ_lmS9Gdrjhfevrg`qNiqqUyWmdK^}-Y82_lVUhYFc`D6 zl+%KtRFYbRuO$a!Y59#?-zpKegobPd!6a{27^;q$(ThvcW^b@9a{PnGfQu@bMNhI( z`Q{J1j5RGX@sNk00@ArDNzxJ*HZ#HJwKyA`F~Uw$aBX_@6jL5#B*3BE_gk2_xy zFVcB_@$rg>Ox%c-X#4kaLJ`q4E0bbulny{y-DOXjt-qr~>_j@>B`Yg>t)-nCMqm^}n@kL!V+PhJ$$sKFMLG|3z{lg(V%s7#X z&pp_tydzelXGZ7v?`-9d7#C`!nNjRYUHvjG;`!0d?m2+7w%^oEe)&jNY~=dUUF5mrTBG$~QP6!9&sgn-D3Si^eaJ1Ur`T7O z<`BcY0&FF1R_*$d8IFbQE2!X*6FlhDVB92%+y2g0vD5a1Ai zLjbOd;Hn6&ieM7LB>aVvu+Yuwzxm^ykG)3lGj&~i=4b}A&)S+qSoGEl)5zXcB~hD0 z4ZQXRRH&}h{=)8Ftd97t>zX^$pSW1tBp82p_#ryPFFs}G-?gITNzE&HvRJ;1n)p@i6)ISbVh(V9{WBIPAhvNByx1wDP zv_AIAVwOHFeA6-&;4LUg#Be*~*(%ZRh9rpVZ)#h>J7y(FD5W-9C{z-k+<5o;FoHK{ zk22gep*I(MPwi`B#4wC7n^ves*0?tcyLe~56-{yzexLr+-9LZZxP)W9nOF$>!Y6TJ zQULTi%Xp4l*A#CHk~-@wW!B7v;@ZO@8<0KJ%Z1K*vY|ufA}+LB;P=`Zs;KVK3KQ9! zHNHHkO8y+$c>(A6Qq3xI4A6^XB8e-p9uJsEj41U9$RSb+h~xivJD2|1zN+zpsz;~_ zFYC}_-q)!UYsj{qg?dNPdB(kS!B;k?F;$d+lXJ+Bc@LMAs`9iWwntAmy;M$~UF=c2 z9lLEkB`}~p<#Sb)%qT?rNvlkBqKFb<3)0H>o->wtVlLHf`^D{;d6>WK5&#*EYa-+o-8*e2zdy8STRx-!KqOfPR9GZo5WdtR+5YAdBtjYQ zr89(UywsWZF=q~vn;^g6HM6F>&M&MC`~UvwcaOLJm?pnFeOP2xV!`9x?vlQ~o-%k9 zH>HD_yHISW;(b$dN0&a;u~Ca6s8p1&j&uI*sI#mC`Hotz`0sc?+npoG*4DE!nSJ!@ z`xDJIZ+9{62Y6kDeZbhILOaTXb{4-b8!pgJ7M_B)#^BmNJ4P%Z2|z6PQT?8 zK;0bV-AFu?Rv9&aih6Prxg-ETCC*=E&(2DtSY4J}TDZ4wVmJNNK{{4@J#-JAAY)jifI(2lW<9wCjzD>{~|Bqkp*@+IP z18u2~b;Ngd{&A$Q31XsMc}yj_^(1PrWG3-VutNd6#0g1jCQF*jxUD|#TjdiQG`60P z8DtHudS8t#&6~FPK;|Bd9x*3N>*w@kt1_vax6!z1a2#Ln>KKC zpzV=e52S)75J=Kk&MP;GDc!MtJS+WyvUzN|T{x9qd%bB*5Y+%&s4cIf^m-qQq)E#g zq@7e~(k)ly;R*Migr)XtN;%4#hpx=OxHp$5Qq+9`-m$p#D5`<*x=f|NJkrMDachpN zVaq;w$5o+W0XSUH+_+wGg6Og&n3h@|>`N7_^lx+shcDOG>x?~oS`he}6p}Ho#0NOz zXI=7Np#*LbFX$F};lY;}TSUPaC&$TP!jbr_@4+J9iK9QpJDc9Tt~Mk%w$)+c_jsxT zzZKvph8Abm@@B2%sh_4k4APWs(#;iH(^9y&I;GvTk%C9$>mXOuz*VW`UT6UkT30XW zh9*!flc)}ZnEl`Vc(XsmE@?6GU-3DA%m2rDv9rL{&K$#*(B%#_nfFtVjjjRi*H-9| z%)y%v60g4Rz12<(j@ixJaaYx<2`N}WPH{4~Nw-(I)}@1Z;5OQgef1J6>wFqe^KDMM z9vZoT7)F<`y>$^CIvYes#P{q4qco4P{crqTzV%lFAjyUCW<>E_yNHCXvk|C6S@&e&=8fEgKnKqrN;mLGs>2$Se5ig%m$T!?lB8m#wd`=ON7~T zX`93nl@<|&$P)nkwo+_>PuR?>)ChZ&dkw>rEy73Zy3cx|fhY~Ux4@MmGzrD&2g~3g z_lV!t%rl9aKU zTHWd!LEk<{5=h(&zJaT-qdIz6U}f` z{uoqdt&JRUrH;Sjlkmb;Ch}Bw1vGTA{2m*a+ptE&V@gsgp!sOF zmhaa|$^eg>=dqMuZ>gR1zJzZfw&I Date: Fri, 24 Aug 2012 23:36:55 +0000 Subject: [PATCH 02/62] MAPREDUCE-4408. allow jobs to set a JAR that is in the distributed cached (rkanter via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377149 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../apache/hadoop/mapreduce/JobSubmitter.java | 14 ++++- .../org/apache/hadoop/mapred/YARNRunner.java | 4 +- .../mapreduce/v2/MiniMRYarnCluster.java | 6 +- .../hadoop/mapreduce/v2/TestMRJobs.java | 62 ++++++++++++++----- 5 files changed, 68 insertions(+), 21 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5587eb6d3a..37c9591995 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -144,6 +144,9 @@ Branch-2 ( Unreleased changes ) MAPREDUCE-4511. Add IFile readahead (ahmed via tucu) + MAPREDUCE-4408. allow jobs to set a JAR that is in the distributed cached + (rkanter via tucu) + BUG FIXES MAPREDUCE-4422. YARN_APPLICATION_CLASSPATH needs a documented default value in diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 08a09c2a69..31081b332f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -232,9 +232,17 @@ private void copyAndConfigureFiles(Job job, Path submitJobDir, if ("".equals(job.getJobName())){ job.setJobName(new Path(jobJar).getName()); } - copyJar(new Path(jobJar), JobSubmissionFiles.getJobJar(submitJobDir), - replication); - job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString()); + Path jobJarPath = new Path(jobJar); + URI jobJarURI = jobJarPath.toUri(); + // If the job jar is already in fs, we don't need to copy it from local fs + if (jobJarURI.getScheme() == null || jobJarURI.getAuthority() == null + || !(jobJarURI.getScheme().equals(jtFs.getUri().getScheme()) + && jobJarURI.getAuthority().equals( + jtFs.getUri().getAuthority()))) { + copyJar(jobJarPath, JobSubmissionFiles.getJobJar(submitJobDir), + replication); + job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString()); + } } else { LOG.warn("No job jar file set. User classes may not be found. "+ "See Job or Job#setJar(String)."); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index 4555f86e88..74ae6446cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -345,10 +345,10 @@ public ApplicationSubmissionContext createApplicationSubmissionContext( createApplicationResource(defaultFileContext, jobConfPath, LocalResourceType.FILE)); if (jobConf.get(MRJobConfig.JAR) != null) { + Path jobJarPath = new Path(jobConf.get(MRJobConfig.JAR)); localResources.put(MRJobConfig.JOB_JAR, createApplicationResource(defaultFileContext, - new Path(jobSubmitDir, MRJobConfig.JOB_JAR), - LocalResourceType.ARCHIVE)); + jobJarPath, LocalResourceType.ARCHIVE)); } else { // Job jar may be null. For e.g, for pipes, the job jar is the hadoop // mapreduce jar itself which is already on the classpath. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index de0ee249ad..8edf4f15d9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -72,8 +72,10 @@ public MiniMRYarnCluster(String testName, int noOfNMs) { @Override public void init(Configuration conf) { conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); - conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(), - "apps_staging_dir/").getAbsolutePath()); + if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) { + conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(), + "apps_staging_dir/").getAbsolutePath()); + } conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000"); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java index 03fdc4e57f..4a30c3cfa6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java @@ -41,10 +41,10 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; @@ -80,15 +80,24 @@ public class TestMRJobs { private static final Log LOG = LogFactory.getLog(TestMRJobs.class); protected static MiniMRYarnCluster mrCluster; + protected static MiniDFSCluster dfsCluster; private static Configuration conf = new Configuration(); private static FileSystem localFs; + private static FileSystem remoteFs; static { try { localFs = FileSystem.getLocal(conf); } catch (IOException io) { throw new RuntimeException("problem getting local fs", io); } + try { + dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2) + .format(true).racks(null).build(); + remoteFs = dfsCluster.getFileSystem(); + } catch (IOException io) { + throw new RuntimeException("problem starting mini dfs cluster", io); + } } private static Path TEST_ROOT_DIR = new Path("target", @@ -107,6 +116,8 @@ public static void setup() throws IOException { if (mrCluster == null) { mrCluster = new MiniMRYarnCluster(TestMRJobs.class.getName(), 3); Configuration conf = new Configuration(); + conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS + conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir"); mrCluster.init(conf); mrCluster.start(); } @@ -123,6 +134,10 @@ public static void tearDown() { mrCluster.stop(); mrCluster = null; } + if (dfsCluster != null) { + dfsCluster.shutdown(); + dfsCluster = null; + } } @Test @@ -403,7 +418,6 @@ public void setup(Context context) throws IOException { Configuration conf = context.getConfiguration(); Path[] files = context.getLocalCacheFiles(); Path[] archives = context.getLocalCacheArchives(); - FileSystem fs = LocalFileSystem.get(conf); // Check that 4 (2 + appjar + DistrubutedCacheChecker jar) files // and 2 archives are present @@ -411,13 +425,13 @@ public void setup(Context context) throws IOException { Assert.assertEquals(2, archives.length); // Check lengths of the files - Assert.assertEquals(1, fs.getFileStatus(files[1]).getLen()); - Assert.assertTrue(fs.getFileStatus(files[2]).getLen() > 1); + Assert.assertEquals(1, localFs.getFileStatus(files[1]).getLen()); + Assert.assertTrue(localFs.getFileStatus(files[2]).getLen() > 1); // Check extraction of the archive - Assert.assertTrue(fs.exists(new Path(archives[0], + Assert.assertTrue(localFs.exists(new Path(archives[0], "distributed.jar.inside3"))); - Assert.assertTrue(fs.exists(new Path(archives[1], + Assert.assertTrue(localFs.exists(new Path(archives[1], "distributed.jar.inside4"))); // Check the class loaders @@ -448,8 +462,7 @@ public void setup(Context context) throws IOException { } } - @Test - public void testDistributedCache() throws Exception { + public void _testDistributedCache(String jobJarPath) throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); @@ -470,11 +483,13 @@ public void testDistributedCache() throws Exception { // Set the job jar to a new "dummy" jar so we can check that its extracted // properly - job.setJar(makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString())); + job.setJar(jobJarPath); // Because the job jar is a "dummy" jar, we need to include the jar with // DistributedCacheChecker or it won't be able to find it - job.addFileToClassPath(new Path( - JarFinder.getJar(DistributedCacheChecker.class))); + Path distributedCacheCheckerJar = new Path( + JarFinder.getJar(DistributedCacheChecker.class)); + job.addFileToClassPath(distributedCacheCheckerJar.makeQualified( + localFs.getUri(), distributedCacheCheckerJar.getParent())); job.setMapperClass(DistributedCacheChecker.class); job.setOutputFormatClass(NullOutputFormat.class); @@ -484,7 +499,9 @@ public void testDistributedCache() throws Exception { job.addCacheFile( new URI(first.toUri().toString() + "#distributed.first.symlink")); job.addFileToClassPath(second); - job.addFileToClassPath(APP_JAR); // The AppMaster jar itself. + // The AppMaster jar itself + job.addFileToClassPath( + APP_JAR.makeQualified(localFs.getUri(), APP_JAR.getParent())); job.addArchiveToClassPath(third); job.addCacheArchive(fourth.toUri()); job.setMaxMapAttempts(1); // speed up failures @@ -497,6 +514,23 @@ public void testDistributedCache() throws Exception { " but didn't Match Job ID " + jobId , trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/")); } + + @Test + public void testDistributedCache() throws Exception { + // Test with a local (file:///) Job Jar + Path localJobJarPath = makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString()); + _testDistributedCache(localJobJarPath.toUri().toString()); + + // Test with a remote (hdfs://) Job Jar + Path remoteJobJarPath = new Path(remoteFs.getUri().toString() + "/", + localJobJarPath.getName()); + remoteFs.moveFromLocalFile(localJobJarPath, remoteJobJarPath); + File localJobJarFile = new File(localJobJarPath.toUri().toString()); + if (localJobJarFile.exists()) { // just to make sure + localJobJarFile.delete(); + } + _testDistributedCache(remoteJobJarPath.toUri().toString()); + } private Path createTempFile(String filename, String contents) throws IOException { @@ -522,7 +556,7 @@ private Path makeJar(Path p, int index) throws FileNotFoundException, return p; } - private String makeJobJarWithLib(String testDir) throws FileNotFoundException, + private Path makeJobJarWithLib(String testDir) throws FileNotFoundException, IOException{ Path jobJarPath = new Path(testDir, "thejob.jar"); FileOutputStream fos = @@ -535,7 +569,7 @@ private String makeJobJarWithLib(String testDir) throws FileNotFoundException, new Path(testDir, "lib2.jar").toUri().getPath())); jos.close(); localFs.setPermission(jobJarPath, new FsPermission("700")); - return jobJarPath.toUri().toString(); + return jobJarPath; } private void createAndAddJarToJar(JarOutputStream jos, File jarFile) From deead78e35b0cb81af875b5a8032cbd06c9a2dae Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Sat, 25 Aug 2012 01:03:22 +0000 Subject: [PATCH 03/62] HDFS-3844. Add @Override and remove {@inheritdoc} and unnecessary imports. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377168 13f79535-47bb-0310-9956-ffa450edef68 --- .../tools/RootDocProcessor.java | 1 + .../org/apache/hadoop/conf/Configuration.java | 2 + .../org/apache/hadoop/conf/Configured.java | 2 + .../hadoop/conf/ReconfigurationServlet.java | 11 ------ .../apache/hadoop/fs/AbstractFileSystem.java | 1 - .../org/apache/hadoop/fs/AvroFSInput.java | 5 +++ .../org/apache/hadoop/fs/BlockLocation.java | 1 + .../hadoop/fs/BufferedFSInputStream.java | 8 +++- .../apache/hadoop/fs/ChecksumFileSystem.java | 18 +++++++-- .../java/org/apache/hadoop/fs/ChecksumFs.java | 10 ++++- .../org/apache/hadoop/fs/ContentSummary.java | 6 +-- .../main/java/org/apache/hadoop/fs/DF.java | 1 + .../main/java/org/apache/hadoop/fs/DU.java | 4 ++ .../apache/hadoop/fs/FSDataInputStream.java | 7 ++++ .../org/apache/hadoop/fs/FSInputChecker.java | 7 ++++ .../org/apache/hadoop/fs/FSInputStream.java | 6 +++ .../org/apache/hadoop/fs/FSOutputSummer.java | 2 + .../org/apache/hadoop/fs/FileChecksum.java | 3 +- .../org/apache/hadoop/fs/FileContext.java | 26 +++++++++++++ .../java/org/apache/hadoop/fs/FileStatus.java | 5 +++ .../java/org/apache/hadoop/fs/FileSystem.java | 12 ++++-- .../java/org/apache/hadoop/fs/FileUtil.java | 2 + .../apache/hadoop/fs/FilterFileSystem.java | 38 +++++++++++++------ .../java/org/apache/hadoop/fs/FilterFs.java | 3 -- .../apache/hadoop/fs/FsServerDefaults.java | 3 ++ .../java/org/apache/hadoop/fs/FsShell.java | 1 + .../java/org/apache/hadoop/fs/FsStatus.java | 2 + .../org/apache/hadoop/fs/FsUrlConnection.java | 1 - .../hadoop/fs/FsUrlStreamHandlerFactory.java | 1 + .../java/org/apache/hadoop/fs/GlobFilter.java | 2 + .../org/apache/hadoop/fs/HarFileSystem.java | 26 +++++++++++++ .../org/apache/hadoop/fs/LocalFileSystem.java | 1 + .../apache/hadoop/fs/LocatedFileStatus.java | 3 ++ .../hadoop/fs/MD5MD5CRC32FileChecksum.java | 20 +++++----- .../java/org/apache/hadoop/fs/Options.java | 1 - .../main/java/org/apache/hadoop/fs/Path.java | 4 ++ .../apache/hadoop/fs/RawLocalFileSystem.java | 36 +++++++++++++++--- .../apache/hadoop/fs/TrashPolicyDefault.java | 1 + .../apache/hadoop/fs/ftp/FTPFileSystem.java | 1 + .../apache/hadoop/fs/ftp/FTPInputStream.java | 9 +++++ .../org/apache/hadoop/fs/kfs/KFSImpl.java | 17 +++++++++ .../apache/hadoop/fs/kfs/KFSInputStream.java | 10 +++++ .../apache/hadoop/fs/kfs/KFSOutputStream.java | 9 ++--- .../hadoop/fs/permission/FsPermission.java | 13 ++++--- .../fs/permission/PermissionStatus.java | 9 +++-- .../hadoop/fs/s3/Jets3tFileSystemStore.java | 14 +++++++ .../apache/hadoop/fs/s3/MigrationTool.java | 4 ++ .../org/apache/hadoop/fs/s3/S3FileSystem.java | 2 + .../s3native/Jets3tNativeFileSystemStore.java | 12 ++++++ .../apache/hadoop/fs/shell/CommandFormat.java | 3 ++ .../org/apache/hadoop/fs/shell/Delete.java | 2 + .../org/apache/hadoop/fs/shell/Display.java | 2 + .../org/apache/hadoop/fs/shell/FsCommand.java | 1 + .../org/apache/hadoop/fs/shell/PathData.java | 1 + .../hadoop/fs/viewfs/ChRootedFileSystem.java | 1 + .../fs/viewfs/NotInMountpointException.java | 4 -- .../hadoop/fs/viewfs/ViewFileSystem.java | 1 + .../hadoop/fs/viewfs/ViewFsFileStatus.java | 3 +- .../hadoop/ha/ActiveStandbyElector.java | 4 ++ .../apache/hadoop/ha/HAServiceProtocol.java | 1 + .../java/org/apache/hadoop/ha/NodeFencer.java | 1 + .../apache/hadoop/ha/SshFenceByTcpPort.java | 2 + .../org/apache/hadoop/http/HttpServer.java | 4 +- .../apache/hadoop/io/AbstractMapWritable.java | 6 ++- .../org/apache/hadoop/io/ArrayWritable.java | 2 + .../org/apache/hadoop/io/BooleanWritable.java | 2 + .../org/apache/hadoop/io/ByteWritable.java | 2 + .../org/apache/hadoop/io/BytesWritable.java | 4 ++ .../apache/hadoop/io/CompressedWritable.java | 2 + .../apache/hadoop/io/DataInputByteBuffer.java | 2 - .../apache/hadoop/io/DefaultStringifier.java | 3 ++ .../org/apache/hadoop/io/DoubleWritable.java | 2 + .../org/apache/hadoop/io/EnumSetWritable.java | 15 ++++---- .../org/apache/hadoop/io/FloatWritable.java | 2 + .../org/apache/hadoop/io/GenericWritable.java | 5 +++ .../java/org/apache/hadoop/io/IOUtils.java | 2 + .../org/apache/hadoop/io/IntWritable.java | 2 + .../org/apache/hadoop/io/LongWritable.java | 8 ++++ .../java/org/apache/hadoop/io/MD5Hash.java | 8 ++++ .../java/org/apache/hadoop/io/MapFile.java | 2 + .../org/apache/hadoop/io/MapWritable.java | 30 +++++++-------- .../org/apache/hadoop/io/NullWritable.java | 4 ++ .../org/apache/hadoop/io/ObjectWritable.java | 7 ++++ .../org/apache/hadoop/io/OutputBuffer.java | 1 + .../org/apache/hadoop/io/ReadaheadPool.java | 1 + .../org/apache/hadoop/io/SecureIOUtils.java | 1 - .../org/apache/hadoop/io/SequenceFile.java | 32 ++++++++++++++++ .../java/org/apache/hadoop/io/SetFile.java | 1 + .../apache/hadoop/io/SortedMapWritable.java | 38 +++++++++---------- .../org/apache/hadoop/io/Stringifier.java | 1 + .../main/java/org/apache/hadoop/io/Text.java | 8 ++++ .../apache/hadoop/io/TwoDArrayWritable.java | 2 + .../main/java/org/apache/hadoop/io/UTF8.java | 2 + .../org/apache/hadoop/io/VIntWritable.java | 2 + .../org/apache/hadoop/io/VLongWritable.java | 2 + .../hadoop/io/VersionMismatchException.java | 1 + .../apache/hadoop/io/VersionedWritable.java | 2 + .../apache/hadoop/io/WritableComparator.java | 2 + .../apache/hadoop/io/compress/BZip2Codec.java | 20 ++++++++++ .../io/compress/BlockCompressorStream.java | 3 ++ .../io/compress/BlockDecompressorStream.java | 3 ++ .../io/compress/CompressionCodecFactory.java | 1 + .../io/compress/CompressionInputStream.java | 5 +++ .../io/compress/CompressionOutputStream.java | 3 ++ .../hadoop/io/compress/CompressorStream.java | 5 +++ .../io/compress/DecompressorStream.java | 9 +++++ .../hadoop/io/compress/DefaultCodec.java | 11 ++++++ .../apache/hadoop/io/compress/GzipCodec.java | 19 ++++++++-- .../io/compress/bzip2/CBZip2InputStream.java | 3 ++ .../io/compress/bzip2/CBZip2OutputStream.java | 5 +++ .../io/compress/lz4/Lz4Decompressor.java | 1 + .../compress/snappy/SnappyDecompressor.java | 1 + .../zlib/BuiltInGzipDecompressor.java | 13 +++++-- .../io/compress/zlib/BuiltInZlibDeflater.java | 1 + .../io/compress/zlib/BuiltInZlibInflater.java | 1 + .../io/compress/zlib/ZlibCompressor.java | 10 +++++ .../io/compress/zlib/ZlibDecompressor.java | 10 +++++ .../apache/hadoop/io/file/tfile/BCFile.java | 4 ++ .../hadoop/io/file/tfile/CompareUtils.java | 1 + .../apache/hadoop/io/file/tfile/TFile.java | 3 ++ .../apache/hadoop/io/nativeio/NativeIO.java | 1 + .../hadoop/io/nativeio/NativeIOException.java | 1 + .../apache/hadoop/io/retry/RetryPolicies.java | 4 ++ .../io/serializer/DeserializerComparator.java | 1 + .../io/serializer/JavaSerialization.java | 12 ++++-- .../JavaSerializationComparator.java | 1 + .../io/serializer/WritableSerialization.java | 2 - .../io/serializer/avro/AvroSerialization.java | 2 + .../java/org/apache/hadoop/ipc/Client.java | 4 ++ .../apache/hadoop/ipc/ProtobufRpcEngine.java | 1 + .../org/apache/hadoop/ipc/ProtocolProxy.java | 1 - .../apache/hadoop/ipc/ProtocolSignature.java | 3 +- .../java/org/apache/hadoop/ipc/Server.java | 1 + .../apache/hadoop/ipc/WritableRpcEngine.java | 7 ++++ .../java/org/apache/hadoop/log/LogLevel.java | 1 + .../metrics/ganglia/GangliaContext.java | 1 - .../hadoop/metrics/spi/CompositeContext.java | 1 - .../spi/NullContextWithUpdateThread.java | 1 - .../hadoop/metrics/spi/OutputRecord.java | 4 -- .../org/apache/hadoop/metrics/spi/Util.java | 1 - .../hadoop/metrics/util/MetricsIntValue.java | 2 - .../metrics/util/MetricsTimeVaryingInt.java | 2 - .../metrics/util/MetricsTimeVaryingLong.java | 2 - .../metrics/util/MetricsTimeVaryingRate.java | 2 - .../hadoop/metrics2/impl/MetricsConfig.java | 1 - .../apache/hadoop/metrics2/sink/FileSink.java | 1 - .../hadoop/metrics2/source/JvmMetrics.java | 1 - .../net/AbstractDNSToSwitchMapping.java | 1 - .../main/java/org/apache/hadoop/net/DNS.java | 1 - .../apache/hadoop/net/NetworkTopology.java | 1 + .../apache/hadoop/net/ScriptBasedMapping.java | 1 - .../hadoop/net/SocketIOWithTimeout.java | 1 - .../apache/hadoop/net/SocketInputStream.java | 5 +++ .../apache/hadoop/net/SocketOutputStream.java | 6 +++ .../apache/hadoop/net/SocksSocketFactory.java | 11 +----- .../hadoop/net/StandardSocketFactory.java | 7 ---- .../org/apache/hadoop/net/TableMapping.java | 1 + .../hadoop/record/BinaryRecordInput.java | 17 +++++++++ .../hadoop/record/BinaryRecordOutput.java | 15 ++++++++ .../java/org/apache/hadoop/record/Buffer.java | 5 +++ .../apache/hadoop/record/CsvRecordInput.java | 16 ++++++++ .../apache/hadoop/record/CsvRecordOutput.java | 14 +++++++ .../java/org/apache/hadoop/record/Record.java | 4 ++ .../hadoop/record/RecordComparator.java | 1 + .../apache/hadoop/record/XmlRecordInput.java | 21 ++++++++++ .../apache/hadoop/record/XmlRecordOutput.java | 14 +++++++ .../hadoop/record/compiler/CGenerator.java | 1 + .../hadoop/record/compiler/CodeBuffer.java | 1 + .../apache/hadoop/record/compiler/Consts.java | 4 -- .../hadoop/record/compiler/CppGenerator.java | 1 + .../hadoop/record/compiler/JBoolean.java | 7 ++++ .../hadoop/record/compiler/JBuffer.java | 9 +++++ .../apache/hadoop/record/compiler/JByte.java | 5 +++ .../hadoop/record/compiler/JCompType.java | 5 +++ .../hadoop/record/compiler/JDouble.java | 6 +++ .../apache/hadoop/record/compiler/JFloat.java | 6 +++ .../apache/hadoop/record/compiler/JInt.java | 5 +++ .../apache/hadoop/record/compiler/JLong.java | 6 +++ .../apache/hadoop/record/compiler/JMap.java | 10 +++++ .../hadoop/record/compiler/JRecord.java | 9 +++++ .../hadoop/record/compiler/JString.java | 6 +++ .../hadoop/record/compiler/JVector.java | 10 +++++ .../hadoop/record/compiler/JavaGenerator.java | 1 + .../hadoop/record/compiler/ant/RccTask.java | 1 + .../compiler/generated/ParseException.java | 1 + .../hadoop/record/compiler/generated/Rcc.java | 1 - .../compiler/generated/RccTokenManager.java | 8 ---- .../record/compiler/generated/Token.java | 1 + .../compiler/generated/TokenMgrError.java | 1 + .../hadoop/record/meta/FieldTypeInfo.java | 2 + .../apache/hadoop/record/meta/MapTypeID.java | 5 ++- .../hadoop/record/meta/RecordTypeInfo.java | 3 ++ .../hadoop/record/meta/StructTypeID.java | 3 ++ .../org/apache/hadoop/record/meta/TypeID.java | 2 + .../hadoop/record/meta/VectorTypeID.java | 3 ++ .../security/RefreshUserMappingsProtocol.java | 1 - .../hadoop/security/SaslInputStream.java | 7 ++++ .../hadoop/security/SaslOutputStream.java | 7 +++- .../apache/hadoop/security/SaslRpcClient.java | 1 + .../apache/hadoop/security/SaslRpcServer.java | 2 - .../apache/hadoop/security/SecurityUtil.java | 2 + .../security/ShellBasedUnixGroupsMapping.java | 3 -- .../ShellBasedUnixGroupsNetgroupMapping.java | 6 --- .../hadoop/security/UserGroupInformation.java | 4 +- .../security/authorize/AccessControlList.java | 3 ++ .../security/authorize/PolicyProvider.java | 1 + .../RefreshAuthorizationPolicyProtocol.java | 1 - .../ssl/FileBasedKeyStoresFactory.java | 1 + .../security/ssl/SSLHostnameVerifier.java | 17 +++++++++ .../apache/hadoop/security/token/Token.java | 4 +- .../AbstractDelegationTokenIdentifier.java | 8 +++- .../AbstractDelegationTokenSecretManager.java | 1 + .../token/delegation/DelegationKey.java | 2 + .../hadoop/tools/GetUserMappingsProtocol.java | 1 - .../apache/hadoop/util/AsyncDiskService.java | 1 + .../org/apache/hadoop/util/DataChecksum.java | 8 ++++ .../java/org/apache/hadoop/util/HeapSort.java | 5 +-- .../java/org/apache/hadoop/util/Progress.java | 1 + .../org/apache/hadoop/util/PureJavaCrc32.java | 8 ++-- .../apache/hadoop/util/PureJavaCrc32C.java | 8 ++-- .../org/apache/hadoop/util/QuickSort.java | 5 +-- .../apache/hadoop/util/ReflectionUtils.java | 1 + .../java/org/apache/hadoop/util/Shell.java | 4 +- .../org/apache/hadoop/util/bloom/Filter.java | 2 + .../org/apache/hadoop/util/bloom/Key.java | 2 + .../apache/hadoop/util/hash/JenkinsHash.java | 1 + .../apache/hadoop/util/hash/MurmurHash.java | 1 + .../apache/hadoop/cli/util/CLICommand.java | 1 + .../apache/hadoop/cli/util/CLITestCmd.java | 6 +++ .../apache/hadoop/cli/util/FSCmdExecutor.java | 1 + .../apache/hadoop/conf/TestConfServlet.java | 1 - .../apache/hadoop/conf/TestConfiguration.java | 2 - .../conf/TestConfigurationDeprecation.java | 2 - .../hadoop/conf/TestDeprecatedKeys.java | 3 -- .../hadoop/conf/TestReconfiguration.java | 6 --- .../hadoop/fs/FSMainOperationsBaseTest.java | 2 + .../fs/FileContextMainOperationsBaseTest.java | 2 + .../hadoop/fs/FileContextPermissionBase.java | 1 + .../apache/hadoop/fs/FileContextURIBase.java | 2 - .../org/apache/hadoop/fs/TestAvroFSInput.java | 1 - .../java/org/apache/hadoop/fs/TestDU.java | 2 + .../TestFSMainOperationsLocalFileSystem.java | 3 ++ .../hadoop/fs/TestFcLocalFsPermission.java | 2 + .../apache/hadoop/fs/TestFcLocalFsUtil.java | 1 + .../hadoop/fs/TestFileSystemCaching.java | 10 +++++ .../org/apache/hadoop/fs/TestFsOptions.java | 2 - .../hadoop/fs/TestFsShellReturnCode.java | 1 + .../org/apache/hadoop/fs/TestListFiles.java | 1 - .../fs/TestLocalFSFileContextCreateMkdir.java | 1 + .../TestLocalFSFileContextMainOperations.java | 2 + .../fs/TestLocalFSFileContextSymlink.java | 5 +++ .../hadoop/fs/TestLocalFsFCStatistics.java | 3 ++ .../hadoop/fs/TestLocal_S3FileContextURI.java | 1 + .../hadoop/fs/TestS3_LocalFileContextURI.java | 1 + .../java/org/apache/hadoop/fs/TestTrash.java | 2 + .../hadoop/fs/kfs/KFSEmulationImpl.java | 17 +++++++++ .../hadoop/fs/kfs/TestKosmosFileSystem.java | 8 +--- .../fs/loadGenerator/DataGenerator.java | 1 + .../fs/loadGenerator/LoadGenerator.java | 2 + .../fs/loadGenerator/StructureGenerator.java | 1 + .../hadoop/fs/s3/InMemoryFileSystemStore.java | 14 +++++++ .../InMemoryNativeFileSystemStore.java | 12 ++++++ .../fs/viewfs/TestChRootedFileSystem.java | 1 + .../TestFSMainOperationsLocalFileSystem.java | 2 + .../fs/viewfs/TestFcCreateMkdirLocalFs.java | 2 + .../viewfs/TestFcMainOperationsLocalFs.java | 2 + .../fs/viewfs/TestFcPermissionsLocalFs.java | 2 + ...tViewFileSystemDelegationTokenSupport.java | 2 + .../TestViewFileSystemLocalFileSystem.java | 2 + ...ileSystemWithAuthorityLocalFileSystem.java | 3 ++ .../hadoop/fs/viewfs/TestViewFsLocalFs.java | 2 + .../hadoop/fs/viewfs/TestViewFsTrash.java | 2 +- .../TestViewFsWithAuthorityLocalFs.java | 3 ++ .../fs/viewfs/TestViewfsFileStatus.java | 1 - .../apache/hadoop/ha/ClientBaseWithFixes.java | 3 +- .../org/apache/hadoop/ha/DummyHAService.java | 1 + .../apache/hadoop/http/TestGlobalFilter.java | 4 ++ .../apache/hadoop/http/TestPathFilter.java | 4 ++ .../apache/hadoop/http/TestServletFilter.java | 5 +++ .../org/apache/hadoop/io/AvroTestUtil.java | 2 - .../org/apache/hadoop/io/RandomDatum.java | 3 ++ .../apache/hadoop/io/TestEnumSetWritable.java | 8 ---- .../apache/hadoop/io/TestGenericWritable.java | 6 +++ .../org/apache/hadoop/io/TestMD5Hash.java | 2 + .../apache/hadoop/io/TestSecureIOUtils.java | 3 -- .../apache/hadoop/io/TestSequenceFile.java | 2 + .../java/org/apache/hadoop/io/TestText.java | 1 + .../hadoop/io/TestVersionedWritable.java | 8 ++++ .../org/apache/hadoop/io/TestWritable.java | 3 ++ .../apache/hadoop/io/TestWritableName.java | 3 ++ .../hadoop/io/compress/TestCodecFactory.java | 13 +++++++ .../hadoop/io/file/tfile/NanoTimer.java | 1 + .../io/file/tfile/TestTFileByteArrays.java | 1 - ...eNoneCodecsJClassComparatorByteArrays.java | 3 -- .../tfile/TestTFileSeqFileComparison.java | 16 ++++++++ .../hadoop/io/nativeio/TestNativeIO.java | 1 + .../hadoop/io/retry/TestFailoverProxy.java | 1 + .../io/retry/UnreliableImplementation.java | 7 +++- .../hadoop/io/serializer/avro/Record.java | 2 + .../avro/TestAvroSerialization.java | 4 ++ .../apache/hadoop/ipc/MiniRPCBenchmark.java | 2 + .../java/org/apache/hadoop/ipc/TestIPC.java | 3 ++ .../java/org/apache/hadoop/ipc/TestRPC.java | 14 +++++++ .../hadoop/ipc/TestRPCCompatibility.java | 1 + .../org/apache/hadoop/ipc/TestSaslRPC.java | 6 +++ .../hadoop/metrics/TestMetricsServlet.java | 1 + .../metrics2/lib/TestMetricsAnnotations.java | 4 +- .../metrics2/lib/TestMetricsRegistry.java | 3 ++ .../org/apache/hadoop/net/StaticMapping.java | 2 - .../org/apache/hadoop/record/FromCpp.java | 2 + .../org/apache/hadoop/record/RecordBench.java | 2 - .../apache/hadoop/record/TestRecordIO.java | 2 + .../hadoop/record/TestRecordVersioning.java | 2 + .../java/org/apache/hadoop/record/ToCpp.java | 2 + .../security/TestAuthenticationFilter.java | 2 +- .../hadoop/security/TestCredentials.java | 3 -- .../security/TestDoAsEffectiveUser.java | 10 ++++- .../hadoop/security/TestGroupsCaching.java | 3 ++ .../hadoop/security/TestJNIGroupsMapping.java | 5 --- .../security/TestUserGroupInformation.java | 6 +++ .../authorize/TestAccessControlList.java | 3 -- .../hadoop/security/token/TestToken.java | 2 - .../token/delegation/TestDelegationToken.java | 9 ++++- .../apache/hadoop/test/GenericTestUtils.java | 1 + .../apache/hadoop/test/MetricsAsserts.java | 1 - .../hadoop/test/MultithreadedTestUtil.java | 2 + .../org/apache/hadoop/util/JarFinder.java | 1 - .../apache/hadoop/util/TestIndexedSort.java | 7 +++- .../org/apache/hadoop/util/TestOptions.java | 1 - .../apache/hadoop/util/TestPureJavaCrc32.java | 2 +- .../hadoop/util/TestReflectionUtils.java | 3 +- .../org/apache/hadoop/util/TestRunJar.java | 2 + .../org/apache/hadoop/util/TestShell.java | 3 +- .../fs/http/client/HttpFSFileSystem.java | 1 - .../client/HttpFSKerberosAuthenticator.java | 3 -- .../HttpFSKerberosAuthenticationHandler.java | 3 -- .../http/server/HttpFSParametersProvider.java | 1 - .../hadoop/lib/lang/RunnableCallable.java | 1 + .../hadoop/FileSystemAccessService.java | 2 + .../InstrumentationService.java | 5 +++ .../service/scheduler/SchedulerService.java | 1 + .../hadoop/lib/servlet/ServerWebApp.java | 2 + .../apache/hadoop/lib/wsrs/BooleanParam.java | 1 + .../org/apache/hadoop/lib/wsrs/ByteParam.java | 1 + .../org/apache/hadoop/lib/wsrs/EnumParam.java | 1 + .../apache/hadoop/lib/wsrs/IntegerParam.java | 1 + .../org/apache/hadoop/lib/wsrs/LongParam.java | 1 + .../org/apache/hadoop/lib/wsrs/Param.java | 1 + .../apache/hadoop/lib/wsrs/ShortParam.java | 1 + .../apache/hadoop/lib/wsrs/StringParam.java | 2 + .../TestHttpFSFileSystemLocalFileSystem.java | 3 ++ .../TestHttpFSWithHttpFSFileSystem.java | 4 ++ ...stHttpFSKerberosAuthenticationHandler.java | 1 - .../apache/hadoop/test/TestHFSTestCase.java | 3 ++ .../org/apache/hadoop/test/TestHTestCase.java | 3 ++ .../apache/hadoop/test/TestHdfsHelper.java | 1 + hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../main/java/org/apache/hadoop/fs/Hdfs.java | 3 -- .../apache/hadoop/hdfs/HsftpFileSystem.java | 1 - .../hadoop/hdfs/RemoteBlockReader2.java | 4 -- .../token/block/BlockTokenIdentifier.java | 2 - .../server/blockmanagement/BlockManager.java | 1 - .../BlockPlacementPolicyWithNodeGroup.java | 9 ----- .../blockmanagement/DatanodeManager.java | 1 - .../blockmanagement/HeartbeatManager.java | 1 - .../PendingDataNodeMessages.java | 4 -- .../hadoop/hdfs/server/common/JspHelper.java | 3 -- .../hadoop/hdfs/server/common/Storage.java | 1 + .../datanode/SecureDataNodeStarter.java | 2 - .../server/namenode/EditLogInputStream.java | 3 -- .../hdfs/server/namenode/FSEditLogLoader.java | 2 - .../hdfs/server/namenode/FSEditLogOp.java | 1 - .../hdfs/server/namenode/FSImageFormat.java | 1 - .../server/namenode/FSImageSerialization.java | 1 - .../server/namenode/NameNodeHttpServer.java | 6 --- .../namenode/RedundantEditLogInputStream.java | 6 --- .../server/namenode/SerialNumberManager.java | 2 +- .../StatisticsEditsVisitor.java | 1 - .../ImageLoaderCurrent.java | 2 - .../hadoop/hdfs/util/CyclicIteration.java | 3 -- .../hadoop/fi/DataTransferTestUtil.java | 1 - .../apache/hadoop/hdfs/PipelinesTestUtil.java | 3 -- .../org/apache/hadoop/hdfs/TestDFSMkdirs.java | 3 -- .../namenode/TestGenericJournalConf.java | 1 - .../TestSecondaryNameNodeUpgrade.java | 6 --- 385 files changed, 1291 insertions(+), 390 deletions(-) diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java index 2783bf3b30..a6ce035fa9 100644 --- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java +++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java @@ -97,6 +97,7 @@ public ExcludeHandler(Object target) { this.target = target; } + @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { String methodName = method.getName(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index cf7aafafb7..f1cb41dd6d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -1847,6 +1847,7 @@ public void clear() { * * @return an iterator over the entries. */ + @Override public Iterator> iterator() { // Get a copy of just the string to string pairs. After the old object // methods that allow non-strings to be put into configurations are removed, @@ -2272,6 +2273,7 @@ public void readFields(DataInput in) throws IOException { } //@Override + @Override public void write(DataOutput out) throws IOException { Properties props = getProps(); WritableUtils.writeVInt(out, props.size()); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java index 2bc7e537e4..f06af2b98d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java @@ -39,11 +39,13 @@ public Configured(Configuration conf) { } // inherit javadoc + @Override public void setConf(Configuration conf) { this.conf = conf; } // inherit javadoc + @Override public Configuration getConf() { return conf; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java index 041b263edd..452d29f7b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java @@ -23,12 +23,10 @@ import org.apache.commons.lang.StringEscapeUtils; import java.util.Collection; -import java.util.Map; import java.util.Enumeration; import java.io.IOException; import java.io.PrintWriter; -import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; @@ -57,9 +55,6 @@ public class ReconfigurationServlet extends HttpServlet { public static final String CONF_SERVLET_RECONFIGURABLE_PREFIX = "conf.servlet.reconfigurable."; - /** - * {@inheritDoc} - */ @Override public void init() throws ServletException { super.init(); @@ -202,9 +197,6 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf, } } - /** - * {@inheritDoc} - */ @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { @@ -219,9 +211,6 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) printFooter(out); } - /** - * {@inheritDoc} - */ @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index d9eda44580..6adbeab60a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -47,7 +47,6 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Progressable; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java index a319fb7b36..b4a4a85674 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java @@ -45,22 +45,27 @@ public AvroFSInput(final FileContext fc, final Path p) throws IOException { this.stream = fc.open(p); } + @Override public long length() { return len; } + @Override public int read(byte[] b, int off, int len) throws IOException { return stream.read(b, off, len); } + @Override public void seek(long p) throws IOException { stream.seek(p); } + @Override public long tell() throws IOException { return stream.getPos(); } + @Override public void close() throws IOException { stream.close(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java index cfe9ee8c66..fa095343c5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java @@ -204,6 +204,7 @@ public void setTopologyPaths(String[] topologyPaths) throws IOException { } } + @Override public String toString() { StringBuilder result = new StringBuilder(); result.append(offset); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java index f322924012..745148281d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java @@ -19,7 +19,6 @@ import java.io.BufferedInputStream; import java.io.FileDescriptor; -import java.io.FileInputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; @@ -50,10 +49,12 @@ public BufferedFSInputStream(FSInputStream in, int size) { super(in, size); } + @Override public long getPos() throws IOException { return ((FSInputStream)in).getPos()-(count-pos); } + @Override public long skip(long n) throws IOException { if (n <= 0) { return 0; @@ -63,6 +64,7 @@ public long skip(long n) throws IOException { return n; } + @Override public void seek(long pos) throws IOException { if( pos<0 ) { return; @@ -82,20 +84,24 @@ public void seek(long pos) throws IOException { ((FSInputStream)in).seek(pos); } + @Override public boolean seekToNewSource(long targetPos) throws IOException { pos = 0; count = 0; return ((FSInputStream)in).seekToNewSource(targetPos); } + @Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { return ((FSInputStream)in).read(position, buffer, offset, length) ; } + @Override public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { ((FSInputStream)in).readFully(position, buffer, offset, length); } + @Override public void readFully(long position, byte[] buffer) throws IOException { ((FSInputStream)in).readFully(position, buffer); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java index 17707718b8..42ee870268 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java @@ -53,6 +53,7 @@ public ChecksumFileSystem(FileSystem fs) { super(fs); } + @Override public void setConf(Configuration conf) { super.setConf(conf); if (conf != null) { @@ -64,6 +65,7 @@ public void setConf(Configuration conf) { /** * Set whether to verify checksum. */ + @Override public void setVerifyChecksum(boolean verifyChecksum) { this.verifyChecksum = verifyChecksum; } @@ -74,6 +76,7 @@ public void setWriteChecksum(boolean writeChecksum) { } /** get the raw file system */ + @Override public FileSystem getRawFileSystem() { return fs; } @@ -162,14 +165,17 @@ private long getChecksumFilePos( long dataPos ) { return HEADER_LENGTH + 4*(dataPos/bytesPerSum); } + @Override protected long getChunkPosition( long dataPos ) { return dataPos/bytesPerSum*bytesPerSum; } + @Override public int available() throws IOException { return datas.available() + super.available(); } + @Override public int read(long position, byte[] b, int off, int len) throws IOException { // parameter check @@ -190,6 +196,7 @@ public int read(long position, byte[] b, int off, int len) return nread; } + @Override public void close() throws IOException { datas.close(); if( sums != null ) { @@ -290,6 +297,7 @@ private long getFileLength() throws IOException { * @exception IOException if an I/O error occurs. * ChecksumException if the chunk to skip to is corrupted */ + @Override public synchronized long skip(long n) throws IOException { long curPos = getPos(); long fileLength = getFileLength(); @@ -311,6 +319,7 @@ public synchronized long skip(long n) throws IOException { * ChecksumException if the chunk to seek to is corrupted */ + @Override public synchronized void seek(long pos) throws IOException { if(pos>getFileLength()) { throw new IOException("Cannot seek after EOF"); @@ -339,7 +348,7 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException { return new FSDataBoundedInputStream(fs, f, in); } - /** {@inheritDoc} */ + @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { throw new IOException("Not supported"); @@ -398,6 +407,7 @@ public ChecksumFSOutputSummer(ChecksumFileSystem fs, sums.writeInt(bytesPerSum); } + @Override public void close() throws IOException { flushBuffer(); sums.close(); @@ -412,7 +422,6 @@ protected void writeChunk(byte[] b, int offset, int len, byte[] checksum) } } - /** {@inheritDoc} */ @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, @@ -454,7 +463,6 @@ private FSDataOutputStream create(Path f, FsPermission permission, return out; } - /** {@inheritDoc} */ @Override public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, @@ -472,6 +480,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, * @return true if successful; * false if file does not exist or is a directory */ + @Override public boolean setReplication(Path src, short replication) throws IOException { boolean value = fs.setReplication(src, replication); if (!value) @@ -487,6 +496,7 @@ public boolean setReplication(Path src, short replication) throws IOException { /** * Rename files/dirs */ + @Override public boolean rename(Path src, Path dst) throws IOException { if (fs.isDirectory(src)) { return fs.rename(src, dst); @@ -516,6 +526,7 @@ public boolean rename(Path src, Path dst) throws IOException { * Implement the delete(Path, boolean) in checksum * file system. */ + @Override public boolean delete(Path f, boolean recursive) throws IOException{ FileStatus fstatus = null; try { @@ -538,6 +549,7 @@ public boolean delete(Path f, boolean recursive) throws IOException{ } final private static PathFilter DEFAULT_FILTER = new PathFilter() { + @Override public boolean accept(Path file) { return !isChecksumFile(file); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java index 4784991982..12805d86a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java @@ -32,7 +32,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.PureJavaCrc32; -import org.apache.hadoop.util.StringUtils; /** * Abstract Checksumed Fs. @@ -61,6 +60,7 @@ public ChecksumFs(AbstractFileSystem theFs) /** * Set whether to verify checksum. */ + @Override public void setVerifyChecksum(boolean inVerifyChecksum) { this.verifyChecksum = inVerifyChecksum; } @@ -152,14 +152,17 @@ private long getChecksumFilePos(long dataPos) { return HEADER_LENGTH + 4*(dataPos/bytesPerSum); } + @Override protected long getChunkPosition(long dataPos) { return dataPos/bytesPerSum*bytesPerSum; } + @Override public int available() throws IOException { return datas.available() + super.available(); } + @Override public int read(long position, byte[] b, int off, int len) throws IOException, UnresolvedLinkException { // parameter check @@ -180,6 +183,7 @@ public int read(long position, byte[] b, int off, int len) return nread; } + @Override public void close() throws IOException { datas.close(); if (sums != null) { @@ -258,6 +262,7 @@ private long getFileLength() throws IOException, UnresolvedLinkException { * @exception IOException if an I/O error occurs. * ChecksumException if the chunk to skip to is corrupted */ + @Override public synchronized long skip(long n) throws IOException { final long curPos = getPos(); final long fileLength = getFileLength(); @@ -279,6 +284,7 @@ public synchronized long skip(long n) throws IOException { * ChecksumException if the chunk to seek to is corrupted */ + @Override public synchronized void seek(long pos) throws IOException { if (pos>getFileLength()) { throw new IOException("Cannot seek after EOF"); @@ -348,6 +354,7 @@ public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, sums.writeInt(bytesPerSum); } + @Override public void close() throws IOException { flushBuffer(); sums.close(); @@ -447,6 +454,7 @@ public void renameInternal(Path src, Path dst) * Implement the delete(Path, boolean) in checksum * file system. */ + @Override public boolean delete(Path f, boolean recursive) throws IOException, UnresolvedLinkException { FileStatus fstatus = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java index c0ab82de1d..0d685b43e1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java @@ -75,7 +75,7 @@ public ContentSummary( /** Returns (disk) space quota */ public long getSpaceQuota() {return spaceQuota;} - /** {@inheritDoc} */ + @Override @InterfaceAudience.Private public void write(DataOutput out) throws IOException { out.writeLong(length); @@ -86,7 +86,7 @@ public void write(DataOutput out) throws IOException { out.writeLong(spaceQuota); } - /** {@inheritDoc} */ + @Override @InterfaceAudience.Private public void readFields(DataInput in) throws IOException { this.length = in.readLong(); @@ -131,7 +131,7 @@ public static String getHeader(boolean qOption) { return qOption ? QUOTA_HEADER : HEADER; } - /** {@inheritDoc} */ + @Override public String toString() { return toString(true); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java index 9949834222..c552f331f8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java @@ -131,6 +131,7 @@ public String getMount() throws IOException { return mount; } + @Override public String toString() { return "df -k " + mount +"\n" + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java index 5caec7204d..2c96b0abaf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java @@ -76,6 +76,7 @@ public DU(File path, Configuration conf) throws IOException { **/ class DURefreshThread implements Runnable { + @Override public void run() { while(shouldRun) { @@ -169,16 +170,19 @@ public void shutdown() { } } + @Override public String toString() { return "du -sk " + dirPath +"\n" + used + "\t" + dirPath; } + @Override protected String[] getExecString() { return new String[] {"du", "-sk", dirPath}; } + @Override protected void parseExecResult(BufferedReader lines) throws IOException { String line = lines.readLine(); if (line == null) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java index e47dffb082..eef53140c3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java @@ -44,6 +44,7 @@ public FSDataInputStream(InputStream in) * * @param desired offset to seek to */ + @Override public synchronized void seek(long desired) throws IOException { ((Seekable)in).seek(desired); } @@ -53,6 +54,7 @@ public synchronized void seek(long desired) throws IOException { * * @return current position in the input stream */ + @Override public long getPos() throws IOException { return ((Seekable)in).getPos(); } @@ -68,6 +70,7 @@ public long getPos() throws IOException { * if there is no more data because the end of the stream has been * reached */ + @Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { return ((PositionedReadable)in).read(position, buffer, offset, length); @@ -85,6 +88,7 @@ public int read(long position, byte[] buffer, int offset, int length) * If an exception is thrown an undetermined number * of bytes in the buffer may have been written. */ + @Override public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { ((PositionedReadable)in).readFully(position, buffer, offset, length); @@ -93,6 +97,7 @@ public void readFully(long position, byte[] buffer, int offset, int length) /** * See {@link #readFully(long, byte[], int, int)}. */ + @Override public void readFully(long position, byte[] buffer) throws IOException { ((PositionedReadable)in).readFully(position, buffer, 0, buffer.length); @@ -104,6 +109,7 @@ public void readFully(long position, byte[] buffer) * @param targetPos position to seek to * @return true if a new source is found, false otherwise */ + @Override public boolean seekToNewSource(long targetPos) throws IOException { return ((Seekable)in).seekToNewSource(targetPos); } @@ -118,6 +124,7 @@ public InputStream getWrappedStream() { return in; } + @Override public int read(ByteBuffer buf) throws IOException { if (in instanceof ByteBufferReadable) { return ((ByteBufferReadable)in).read(buf); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java index 9974f27e24..cc992e7c94 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java @@ -140,6 +140,7 @@ protected synchronized boolean needChecksum() { * @exception IOException if an I/O error occurs. */ + @Override public synchronized int read() throws IOException { if (pos >= count) { fill(); @@ -180,6 +181,7 @@ public synchronized int read() throws IOException { * @exception IOException if an I/O error occurs. * ChecksumException if any checksum error occurs */ + @Override public synchronized int read(byte[] b, int off, int len) throws IOException { // parameter check if ((off | len | (off + len) | (b.length - (off + len))) < 0) { @@ -367,6 +369,7 @@ public synchronized int available() throws IOException { * @exception IOException if an I/O error occurs. * ChecksumException if the chunk to skip to is corrupted */ + @Override public synchronized long skip(long n) throws IOException { if (n <= 0) { return 0; @@ -389,6 +392,7 @@ public synchronized long skip(long n) throws IOException { * ChecksumException if the chunk to seek to is corrupted */ + @Override public synchronized void seek(long pos) throws IOException { if( pos<0 ) { return; @@ -462,13 +466,16 @@ final protected synchronized void set(boolean verifyChecksum, this.pos = 0; } + @Override final public boolean markSupported() { return false; } + @Override final public void mark(int readlimit) { } + @Override final public void reset() throws IOException { throw new IOException("mark/reset not supported"); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java index f7bc22159d..8d668feeab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java @@ -36,19 +36,23 @@ public abstract class FSInputStream extends InputStream * The next read() will be from that location. Can't * seek past the end of the file. */ + @Override public abstract void seek(long pos) throws IOException; /** * Return the current offset from the start of the file */ + @Override public abstract long getPos() throws IOException; /** * Seeks a different copy of the data. Returns true if * found a new source, false otherwise. */ + @Override public abstract boolean seekToNewSource(long targetPos) throws IOException; + @Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { synchronized (this) { @@ -64,6 +68,7 @@ public int read(long position, byte[] buffer, int offset, int length) } } + @Override public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { int nread = 0; @@ -76,6 +81,7 @@ public void readFully(long position, byte[] buffer, int offset, int length) } } + @Override public void readFully(long position, byte[] buffer) throws IOException { readFully(position, buffer, 0, buffer.length); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java index 66b6a74916..d494f30de7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java @@ -55,6 +55,7 @@ protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksu throws IOException; /** Write one byte */ + @Override public synchronized void write(int b) throws IOException { sum.update(b); buf[count++] = (byte)b; @@ -81,6 +82,7 @@ public synchronized void write(int b) throws IOException { * @param len the number of bytes to write. * @exception IOException if an I/O error occurs. */ + @Override public synchronized void write(byte b[], int off, int len) throws IOException { if (off < 0 || len < 0 || off > b.length - len) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java index 2b248bdcf2..149a3e3a4a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java @@ -37,6 +37,7 @@ public abstract class FileChecksum implements Writable { public abstract byte[] getBytes(); /** Return true if both the algorithms and the values are the same. */ + @Override public boolean equals(Object other) { if (other == this) { return true; @@ -50,7 +51,7 @@ public boolean equals(Object other) { && Arrays.equals(this.getBytes(), that.getBytes()); } - /** {@inheritDoc} */ + @Override public int hashCode() { return getAlgorithmName().hashCode() ^ Arrays.hashCode(getBytes()); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 4e5057a4e9..5cfce9b019 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -190,6 +190,7 @@ public final class FileContext { new FileContextFinalizer(); private static final PathFilter DEFAULT_FILTER = new PathFilter() { + @Override public boolean accept(final Path file) { return true; } @@ -318,6 +319,7 @@ private static AbstractFileSystem getAbstractFileSystem( throws UnsupportedFileSystemException, IOException { try { return user.doAs(new PrivilegedExceptionAction() { + @Override public AbstractFileSystem run() throws UnsupportedFileSystemException { return AbstractFileSystem.get(uri, conf); } @@ -660,6 +662,7 @@ public FSDataOutputStream create(final Path f, final CreateOpts[] updatedOpts = CreateOpts.setOpt(CreateOpts.perms(permission), opts); return new FSLinkResolver() { + @Override public FSDataOutputStream next(final AbstractFileSystem fs, final Path p) throws IOException { return fs.create(p, createFlag, updatedOpts); @@ -703,6 +706,7 @@ public void mkdir(final Path dir, final FsPermission permission, final FsPermission absFerms = (permission == null ? FsPermission.getDefault() : permission).applyUMask(umask); new FSLinkResolver() { + @Override public Void next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { fs.mkdir(p, absFerms, createParent); @@ -738,6 +742,7 @@ public boolean delete(final Path f, final boolean recursive) UnsupportedFileSystemException, IOException { Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public Boolean next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return Boolean.valueOf(fs.delete(p, recursive)); @@ -766,6 +771,7 @@ public FSDataInputStream open(final Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public FSDataInputStream next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.open(p); @@ -796,6 +802,7 @@ public FSDataInputStream open(final Path f, final int bufferSize) UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public FSDataInputStream next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.open(p, bufferSize); @@ -826,6 +833,7 @@ public boolean setReplication(final Path f, final short replication) IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public Boolean next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return Boolean.valueOf(fs.setReplication(p, replication)); @@ -894,6 +902,7 @@ public void rename(final Path src, final Path dst, */ final Path source = resolveIntermediate(absSrc); new FSLinkResolver() { + @Override public Void next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { fs.rename(source, p, options); @@ -925,6 +934,7 @@ public void setPermission(final Path f, final FsPermission permission) UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); new FSLinkResolver() { + @Override public Void next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { fs.setPermission(p, permission); @@ -967,6 +977,7 @@ public void setOwner(final Path f, final String username, } final Path absF = fixRelativePart(f); new FSLinkResolver() { + @Override public Void next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { fs.setOwner(p, username, groupname); @@ -1002,6 +1013,7 @@ public void setTimes(final Path f, final long mtime, final long atime) UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); new FSLinkResolver() { + @Override public Void next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { fs.setTimes(p, mtime, atime); @@ -1034,6 +1046,7 @@ public FileChecksum getFileChecksum(final Path f) IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public FileChecksum next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.getFileChecksum(p); @@ -1089,6 +1102,7 @@ public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public FileStatus next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.getFileStatus(p); @@ -1135,6 +1149,7 @@ public FileStatus getFileLinkStatus(final Path f) UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public FileStatus next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { FileStatus fi = fs.getFileLinkStatus(p); @@ -1165,6 +1180,7 @@ public Path getLinkTarget(final Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public Path next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { FileStatus fi = fs.getFileLinkStatus(p); @@ -1208,6 +1224,7 @@ public BlockLocation[] getFileBlockLocations(final Path f, final long start, UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public BlockLocation[] next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.getFileBlockLocations(p, start, len); @@ -1246,6 +1263,7 @@ public FsStatus getFsStatus(final Path f) throws AccessControlException, } final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public FsStatus next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.getFsStatus(p); @@ -1339,6 +1357,7 @@ public void createSymlink(final Path target, final Path link, IOException { final Path nonRelLink = fixRelativePart(link); new FSLinkResolver() { + @Override public Void next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { fs.createSymlink(target, p, createParent); @@ -1373,6 +1392,7 @@ public RemoteIterator listStatus(final Path f) throws UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver>() { + @Override public RemoteIterator next( final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { @@ -1432,6 +1452,7 @@ public RemoteIterator listLocatedStatus( UnsupportedFileSystemException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver>() { + @Override public RemoteIterator next( final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { @@ -1703,6 +1724,7 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException, IOException { final Path absF = fixRelativePart(f); return new FSLinkResolver() { + @Override public FileStatus[] next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.listStatus(p); @@ -2232,6 +2254,7 @@ private static boolean isSameFS(Path qualPath1, Path qualPath2) { * Deletes all the paths in deleteOnExit on JVM shutdown. */ static class FileContextFinalizer implements Runnable { + @Override public synchronized void run() { processDeleteOnExit(); } @@ -2244,6 +2267,7 @@ public synchronized void run() { protected Path resolve(final Path f) throws FileNotFoundException, UnresolvedLinkException, AccessControlException, IOException { return new FSLinkResolver() { + @Override public Path next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.resolvePath(p); @@ -2259,6 +2283,7 @@ public Path next(final AbstractFileSystem fs, final Path p) */ protected Path resolveIntermediate(final Path f) throws IOException { return new FSLinkResolver() { + @Override public FileStatus next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { return fs.getFileLinkStatus(p); @@ -2281,6 +2306,7 @@ Set resolveAbstractFileSystems(final Path f) final HashSet result = new HashSet(); new FSLinkResolver() { + @Override public Void next(final AbstractFileSystem fs, final Path p) throws IOException, UnresolvedLinkException { result.add(fs); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index 2757475faf..5445f6eb15 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -253,6 +253,7 @@ public void setSymlink(final Path p) { ////////////////////////////////////////////////// // Writable ////////////////////////////////////////////////// + @Override public void write(DataOutput out) throws IOException { Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN); out.writeLong(getLen()); @@ -270,6 +271,7 @@ public void write(DataOutput out) throws IOException { } } + @Override public void readFields(DataInput in) throws IOException { String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN); this.path = new Path(strPath); @@ -299,6 +301,7 @@ public void readFields(DataInput in) throws IOException { * @throws ClassCastException if the specified object's is not of * type FileStatus */ + @Override public int compareTo(Object o) { FileStatus other = (FileStatus)o; return this.getPath().compareTo(other.getPath()); @@ -308,6 +311,7 @@ public int compareTo(Object o) { * @param o the object to be compared. * @return true if two file status has the same path name; false if not. */ + @Override public boolean equals(Object o) { if (o == null) { return false; @@ -328,6 +332,7 @@ public boolean equals(Object o) { * * @return a hash code value for the path name. */ + @Override public int hashCode() { return getPath().hashCode(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 31b59439a9..ff9f2db1ff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -147,6 +147,7 @@ public static FileSystem get(final URI uri, final Configuration conf, UserGroupInformation ugi = UserGroupInformation.getBestUGI(ticketCachePath, user); return ugi.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws IOException { return get(uri, conf); } @@ -332,6 +333,7 @@ public static FileSystem newInstance(final URI uri, final Configuration conf, UserGroupInformation ugi = UserGroupInformation.getBestUGI(ticketCachePath, user); return ugi.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws IOException { return newInstance(uri,conf); } @@ -1389,6 +1391,7 @@ public ContentSummary getContentSummary(Path f) throws IOException { } final private static PathFilter DEFAULT_FILTER = new PathFilter() { + @Override public boolean accept(Path file) { return true; } @@ -2056,6 +2059,7 @@ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) * No more filesystem operations are needed. Will * release any held locks. */ + @Override public void close() throws IOException { // delete all files that were marked as delete-on-exit. processDeleteOnExit(); @@ -2393,6 +2397,7 @@ synchronized void closeAll(boolean onlyAutomatic) throws IOException { } private class ClientFinalizer implements Runnable { + @Override public synchronized void run() { try { closeAll(true); @@ -2447,7 +2452,7 @@ static class Key { this.ugi = UserGroupInformation.getCurrentUser(); } - /** {@inheritDoc} */ + @Override public int hashCode() { return (scheme + authority).hashCode() + ugi.hashCode() + (int)unique; } @@ -2456,7 +2461,7 @@ static boolean isEqual(Object a, Object b) { return a == b || (a != null && a.equals(b)); } - /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { if (obj == this) { return true; @@ -2471,7 +2476,7 @@ && isEqual(this.ugi, that.ugi) return false; } - /** {@inheritDoc} */ + @Override public String toString() { return "("+ugi.toString() + ")@" + scheme + "://" + authority; } @@ -2584,6 +2589,7 @@ public int getWriteOps() { return writeOps.get(); } + @Override public String toString() { return bytesRead + " bytes read, " + bytesWritten + " bytes written, " + readOps + " read ops, " + largeReadOps + " large read ops, " diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index ba9bb4eafe..b6a2acae49 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -414,9 +414,11 @@ private static class CygPathCommand extends Shell { String getResult() throws IOException { return result; } + @Override protected String[] getExecString() { return command; } + @Override protected void parseExecResult(BufferedReader lines) throws IOException { String line = lines.readLine(); if (line == null) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index c2ecd20b5a..6e1e099cb0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -76,6 +76,7 @@ public FileSystem getRawFileSystem() { * for this FileSystem * @param conf the configuration */ + @Override public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); // this is less than ideal, but existing filesystems sometimes neglect @@ -90,6 +91,7 @@ public void initialize(URI name, Configuration conf) throws IOException { } /** Returns a URI whose scheme and authority identify this FileSystem.*/ + @Override public URI getUri() { return fs.getUri(); } @@ -104,6 +106,7 @@ protected URI getCanonicalUri() { } /** Make sure that a path specifies a FileSystem. */ + @Override public Path makeQualified(Path path) { Path fqPath = fs.makeQualified(path); // swap in our scheme if the filtered fs is using a different scheme @@ -125,10 +128,12 @@ public Path makeQualified(Path path) { /////////////////////////////////////////////////////////////// /** Check that a Path belongs to this FileSystem. */ + @Override protected void checkPath(Path path) { fs.checkPath(path); } + @Override public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { return fs.getFileBlockLocations(file, start, len); @@ -143,17 +148,17 @@ public Path resolvePath(final Path p) throws IOException { * @param f the file name to open * @param bufferSize the size of the buffer to be used. */ + @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { return fs.open(f, bufferSize); } - /** {@inheritDoc} */ + @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { return fs.append(f, bufferSize, progress); } - /** {@inheritDoc} */ @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, @@ -171,6 +176,7 @@ public FSDataOutputStream create(Path f, FsPermission permission, * @return true if successful; * false if file does not exist or is a directory */ + @Override public boolean setReplication(Path src, short replication) throws IOException { return fs.setReplication(src, replication); } @@ -179,23 +185,23 @@ public boolean setReplication(Path src, short replication) throws IOException { * Renames Path src to Path dst. Can take place on local fs * or remote DFS. */ + @Override public boolean rename(Path src, Path dst) throws IOException { return fs.rename(src, dst); } /** Delete a file */ + @Override public boolean delete(Path f, boolean recursive) throws IOException { return fs.delete(f, recursive); } /** List files in a directory. */ + @Override public FileStatus[] listStatus(Path f) throws IOException { return fs.listStatus(f); } - /** - * {@inheritDoc} - */ @Override public RemoteIterator listCorruptFileBlocks(Path path) throws IOException { @@ -203,11 +209,13 @@ public RemoteIterator listCorruptFileBlocks(Path path) } /** List files and its block locations in a directory. */ + @Override public RemoteIterator listLocatedStatus(Path f) throws IOException { return fs.listLocatedStatus(f); } + @Override public Path getHomeDirectory() { return fs.getHomeDirectory(); } @@ -219,6 +227,7 @@ public Path getHomeDirectory() { * * @param newDir */ + @Override public void setWorkingDirectory(Path newDir) { fs.setWorkingDirectory(newDir); } @@ -228,21 +237,21 @@ public void setWorkingDirectory(Path newDir) { * * @return the directory pathname */ + @Override public Path getWorkingDirectory() { return fs.getWorkingDirectory(); } + @Override protected Path getInitialWorkingDirectory() { return fs.getInitialWorkingDirectory(); } - /** {@inheritDoc} */ @Override public FsStatus getStatus(Path p) throws IOException { return fs.getStatus(p); } - /** {@inheritDoc} */ @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { return fs.mkdirs(f, permission); @@ -254,6 +263,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { * the given dst name. * delSrc indicates if the source should be removed */ + @Override public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws IOException { fs.copyFromLocalFile(delSrc, src, dst); @@ -264,6 +274,7 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst) * the given dst name. * delSrc indicates if the source should be removed */ + @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, Path dst) throws IOException { @@ -275,6 +286,7 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite, * the given dst name. * delSrc indicates if the source should be removed */ + @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { @@ -286,6 +298,7 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite, * Copy it from FS control to the local dst name. * delSrc indicates if the src will be removed or not. */ + @Override public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException { fs.copyToLocalFile(delSrc, src, dst); @@ -297,6 +310,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst) * file. If the FS is local, we write directly into the target. If * the FS is remote, we write into the tmp local area. */ + @Override public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { return fs.startLocalOutput(fsOutputFile, tmpLocalFile); @@ -308,12 +322,14 @@ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) * FS will copy the contents of tmpLocalFile to the correct target at * fsOutputFile. */ + @Override public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { fs.completeLocalOutput(fsOutputFile, tmpLocalFile); } /** Return the total size of all files in the filesystem.*/ + @Override public long getUsed() throws IOException{ return fs.getUsed(); } @@ -357,16 +373,17 @@ public FsServerDefaults getServerDefaults(Path f) throws IOException { /** * Get file status. */ + @Override public FileStatus getFileStatus(Path f) throws IOException { return fs.getFileStatus(f); } - /** {@inheritDoc} */ + @Override public FileChecksum getFileChecksum(Path f) throws IOException { return fs.getFileChecksum(f); } - /** {@inheritDoc} */ + @Override public void setVerifyChecksum(boolean verifyChecksum) { fs.setVerifyChecksum(verifyChecksum); } @@ -387,21 +404,18 @@ public void close() throws IOException { fs.close(); } - /** {@inheritDoc} */ @Override public void setOwner(Path p, String username, String groupname ) throws IOException { fs.setOwner(p, username, groupname); } - /** {@inheritDoc} */ @Override public void setTimes(Path p, long mtime, long atime ) throws IOException { fs.setTimes(p, mtime, atime); } - /** {@inheritDoc} */ @Override public void setPermission(Path p, FsPermission permission ) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index 6cfc11b1fa..9637b6b913 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -174,9 +174,6 @@ public FileStatus[] listStatus(Path f) return myFs.listStatus(f); } - /** - * {@inheritDoc} - */ @Override public RemoteIterator listCorruptFileBlocks(Path path) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java index 637697b83d..c1b9071bbc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java @@ -39,6 +39,7 @@ public class FsServerDefaults implements Writable { static { // register a ctor WritableFactories.setFactory(FsServerDefaults.class, new WritableFactory() { + @Override public Writable newInstance() { return new FsServerDefaults(); } @@ -106,6 +107,7 @@ public DataChecksum.Type getChecksumType() { // ///////////////////////////////////////// // Writable // ///////////////////////////////////////// + @Override @InterfaceAudience.Private public void write(DataOutput out) throws IOException { out.writeLong(blockSize); @@ -116,6 +118,7 @@ public void write(DataOutput out) throws IOException { WritableUtils.writeEnum(out, checksumType); } + @Override @InterfaceAudience.Private public void readFields(DataInput in) throws IOException { blockSize = in.readLong(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java index 4da32789e5..0db1f9e431 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java @@ -236,6 +236,7 @@ private void printInstanceHelp(PrintStream out, Command instance) { /** * run */ + @Override public int run(String argv[]) throws Exception { // initialize FsShell init(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java index 8b9de78fe0..d392c7d765 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java @@ -60,12 +60,14 @@ public long getRemaining() { ////////////////////////////////////////////////// // Writable ////////////////////////////////////////////////// + @Override public void write(DataOutput out) throws IOException { out.writeLong(capacity); out.writeLong(used); out.writeLong(remaining); } + @Override public void readFields(DataInput in) throws IOException { capacity = in.readLong(); used = in.readLong(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java index 65c608ddec..90e75b0ccb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java @@ -53,7 +53,6 @@ public void connect() throws IOException { } } - /* @inheritDoc */ @Override public InputStream getInputStream() throws IOException { if (is == null) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java index b9a5f1a2cc..2a9208ea5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java @@ -59,6 +59,7 @@ public FsUrlStreamHandlerFactory(Configuration conf) { this.handler = new FsUrlStreamHandler(this.conf); } + @Override public java.net.URLStreamHandler createURLStreamHandler(String protocol) { if (!protocols.containsKey(protocol)) { boolean known = true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java index 5afa9e911d..24bff5f9cf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java @@ -31,6 +31,7 @@ @InterfaceStability.Evolving public class GlobFilter implements PathFilter { private final static PathFilter DEFAULT_FILTER = new PathFilter() { + @Override public boolean accept(Path file) { return true; } @@ -75,6 +76,7 @@ boolean hasPattern() { return pattern.hasWildcard(); } + @Override public boolean accept(Path path) { return pattern.matches(path.getName()) && userFilter.accept(path); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java index 8e03fc35a9..9504e1fda6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java @@ -106,6 +106,7 @@ public HarFileSystem(FileSystem fs) { * har:///archivepath. This assumes the underlying filesystem * to be used in case not specified. */ + @Override public void initialize(URI name, Configuration conf) throws IOException { // decode the name URI underLyingURI = decodeHarURI(name, conf); @@ -247,6 +248,7 @@ private String decodeFileName(String fname) /** * return the top level archive. */ + @Override public Path getWorkingDirectory() { return new Path(uri.toString()); } @@ -636,6 +638,7 @@ private HarStatus getFileHarStatus(Path f) throws IOException { /** * @return null since no checksum algorithm is implemented. */ + @Override public FileChecksum getFileChecksum(Path f) { return null; } @@ -668,6 +671,7 @@ public FSDataOutputStream create(Path f, int bufferSize) throw new IOException("Har: Create not allowed"); } + @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, @@ -735,10 +739,12 @@ public FileStatus[] listStatus(Path f) throws IOException { /** * return the top level archive path. */ + @Override public Path getHomeDirectory() { return new Path(uri.toString()); } + @Override public void setWorkingDirectory(Path newDir) { //does nothing. } @@ -746,6 +752,7 @@ public void setWorkingDirectory(Path newDir) { /** * not implemented. */ + @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { throw new IOException("Har: mkdirs not allowed"); } @@ -753,6 +760,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { /** * not implemented. */ + @Override public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws IOException { throw new IOException("Har: copyfromlocalfile not allowed"); @@ -761,6 +769,7 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws /** * copies the file in the har filesystem to a local file. */ + @Override public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException { FileUtil.copy(this, src, getLocal(getConf()), dst, false, getConf()); @@ -769,6 +778,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst) /** * not implemented. */ + @Override public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { throw new IOException("Har: startLocalOutput not allowed"); @@ -777,6 +787,7 @@ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) /** * not implemented. */ + @Override public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { throw new IOException("Har: completeLocalOutput not allowed"); @@ -785,6 +796,7 @@ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) /** * not implemented. */ + @Override public void setOwner(Path p, String username, String groupname) throws IOException { throw new IOException("Har: setowner not allowed"); @@ -793,6 +805,7 @@ public void setOwner(Path p, String username, String groupname) /** * Not implemented. */ + @Override public void setPermission(Path p, FsPermission permisssion) throws IOException { throw new IOException("Har: setPermission not allowed"); @@ -825,6 +838,7 @@ private static class HarFsInputStream extends FSInputStream { this.end = start + length; } + @Override public synchronized int available() throws IOException { long remaining = end - underLyingStream.getPos(); if (remaining > (long)Integer.MAX_VALUE) { @@ -833,6 +847,7 @@ public synchronized int available() throws IOException { return (int) remaining; } + @Override public synchronized void close() throws IOException { underLyingStream.close(); super.close(); @@ -847,15 +862,18 @@ public void mark(int readLimit) { /** * reset is not implemented */ + @Override public void reset() throws IOException { throw new IOException("reset not implemented."); } + @Override public synchronized int read() throws IOException { int ret = read(oneBytebuff, 0, 1); return (ret <= 0) ? -1: (oneBytebuff[0] & 0xff); } + @Override public synchronized int read(byte[] b) throws IOException { int ret = read(b, 0, b.length); if (ret != -1) { @@ -867,6 +885,7 @@ public synchronized int read(byte[] b) throws IOException { /** * */ + @Override public synchronized int read(byte[] b, int offset, int len) throws IOException { int newlen = len; @@ -882,6 +901,7 @@ public synchronized int read(byte[] b, int offset, int len) return ret; } + @Override public synchronized long skip(long n) throws IOException { long tmpN = n; if (tmpN > 0) { @@ -895,10 +915,12 @@ public synchronized long skip(long n) throws IOException { return (tmpN < 0)? -1 : 0; } + @Override public synchronized long getPos() throws IOException { return (position - start); } + @Override public synchronized void seek(long pos) throws IOException { if (pos < 0 || (start + pos > end)) { throw new IOException("Failed to seek: EOF"); @@ -907,6 +929,7 @@ public synchronized void seek(long pos) throws IOException { underLyingStream.seek(position); } + @Override public boolean seekToNewSource(long targetPos) throws IOException { //do not need to implement this // hdfs in itself does seektonewsource @@ -917,6 +940,7 @@ public boolean seekToNewSource(long targetPos) throws IOException { /** * implementing position readable. */ + @Override public int read(long pos, byte[] b, int offset, int length) throws IOException { int nlength = length; @@ -929,6 +953,7 @@ public int read(long pos, byte[] b, int offset, int length) /** * position readable again. */ + @Override public void readFully(long pos, byte[] b, int offset, int length) throws IOException { if (start + length + pos > end) { @@ -937,6 +962,7 @@ public void readFully(long pos, byte[] b, int offset, int length) underLyingStream.readFully(pos + start, b, offset, length); } + @Override public void readFully(long pos, byte[] b) throws IOException { readFully(pos, b, 0, b.length); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java index 394c01f705..7db348c557 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java @@ -91,6 +91,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst) * Moves files to a bad file directory on the same device, so that their * storage will not be reused. */ + @Override public boolean reportChecksumFailure(Path p, FSDataInputStream in, long inPos, FSDataInputStream sums, long sumsPos) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java index b0779ed82f..01368944a4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java @@ -94,6 +94,7 @@ public BlockLocation[] getBlockLocations() { * @throws ClassCastException if the specified object's is not of * type FileStatus */ + @Override public int compareTo(Object o) { return super.compareTo(o); } @@ -102,6 +103,7 @@ public int compareTo(Object o) { * @param o the object to be compared. * @return true if two file status has the same path name; false if not. */ + @Override public boolean equals(Object o) { return super.equals(o); } @@ -112,6 +114,7 @@ public boolean equals(Object o) { * * @return a hash code value for the path name. */ + @Override public int hashCode() { return super.hashCode(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java index 1c697b7f52..5bddb96f0c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java @@ -57,7 +57,7 @@ public MD5MD5CRC32FileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) { this.md5 = md5; } - /** {@inheritDoc} */ + @Override public String getAlgorithmName() { return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC + getCrcType().name(); @@ -73,11 +73,11 @@ public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm) throw new IOException("Unknown checksum type in " + algorithm); } - - /** {@inheritDoc} */ + + @Override public int getLength() {return LENGTH;} - - /** {@inheritDoc} */ + + @Override public byte[] getBytes() { return WritableUtils.toByteArray(this); } @@ -92,14 +92,14 @@ public ChecksumOpt getChecksumOpt() { return new ChecksumOpt(getCrcType(), bytesPerCRC); } - /** {@inheritDoc} */ + @Override public void readFields(DataInput in) throws IOException { bytesPerCRC = in.readInt(); crcPerBlock = in.readLong(); md5 = MD5Hash.read(in); } - - /** {@inheritDoc} */ + + @Override public void write(DataOutput out) throws IOException { out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); @@ -161,8 +161,8 @@ public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs + ", md5=" + md5, e); } } - - /** {@inheritDoc} */ + + @Override public String toString() { return getAlgorithmName() + ":" + md5; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index 173e16ea41..8464e51270 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -22,7 +22,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.HadoopIllegalArgumentException; /** * This class contains options related to file system operations. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 74c85af48b..c0ebebfe67 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -261,6 +261,7 @@ public Path suffix(String suffix) { return new Path(getParent(), getName()+suffix); } + @Override public String toString() { // we can't use uri.toString(), which escapes everything, because we want // illegal characters unescaped in the string, for glob processing, etc. @@ -289,6 +290,7 @@ public String toString() { return buffer.toString(); } + @Override public boolean equals(Object o) { if (!(o instanceof Path)) { return false; @@ -297,10 +299,12 @@ public boolean equals(Object o) { return this.uri.equals(that.uri); } + @Override public int hashCode() { return uri.hashCode(); } + @Override public int compareTo(Object o) { Path that = (Path)o; return this.uri.compareTo(that.uri); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index 38e991480a..b33b1a778f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -72,8 +72,10 @@ public File pathToFile(Path path) { return new File(path.toUri().getPath()); } + @Override public URI getUri() { return NAME; } + @Override public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); @@ -84,6 +86,7 @@ public TrackingFileInputStream(File f) throws IOException { super(f); } + @Override public int read() throws IOException { int result = super.read(); if (result != -1) { @@ -92,6 +95,7 @@ public int read() throws IOException { return result; } + @Override public int read(byte[] data) throws IOException { int result = super.read(data); if (result != -1) { @@ -100,6 +104,7 @@ public int read(byte[] data) throws IOException { return result; } + @Override public int read(byte[] data, int offset, int length) throws IOException { int result = super.read(data, offset, length); if (result != -1) { @@ -120,15 +125,18 @@ public LocalFSFileInputStream(Path f) throws IOException { this.fis = new TrackingFileInputStream(pathToFile(f)); } + @Override public void seek(long pos) throws IOException { fis.getChannel().position(pos); this.position = pos; } + @Override public long getPos() throws IOException { return this.position; } + @Override public boolean seekToNewSource(long targetPos) throws IOException { return false; } @@ -136,11 +144,14 @@ public boolean seekToNewSource(long targetPos) throws IOException { /* * Just forward to the fis */ + @Override public int available() throws IOException { return fis.available(); } + @Override public void close() throws IOException { fis.close(); } @Override public boolean markSupported() { return false; } + @Override public int read() throws IOException { try { int value = fis.read(); @@ -153,6 +164,7 @@ public int read() throws IOException { } } + @Override public int read(byte[] b, int off, int len) throws IOException { try { int value = fis.read(b, off, len); @@ -165,6 +177,7 @@ public int read(byte[] b, int off, int len) throws IOException { } } + @Override public int read(long position, byte[] b, int off, int len) throws IOException { ByteBuffer bb = ByteBuffer.wrap(b, off, len); @@ -175,6 +188,7 @@ public int read(long position, byte[] b, int off, int len) } } + @Override public long skip(long n) throws IOException { long value = fis.skip(n); if (value > 0) { @@ -189,6 +203,7 @@ public FileDescriptor getFileDescriptor() throws IOException { } } + @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { if (!exists(f)) { throw new FileNotFoundException(f.toString()); @@ -210,8 +225,11 @@ private LocalFSFileOutputStream(Path f, boolean append) throws IOException { /* * Just forward to the fos */ + @Override public void close() throws IOException { fos.close(); } + @Override public void flush() throws IOException { fos.flush(); } + @Override public void write(byte[] b, int off, int len) throws IOException { try { fos.write(b, off, len); @@ -220,6 +238,7 @@ public void write(byte[] b, int off, int len) throws IOException { } } + @Override public void write(int b) throws IOException { try { fos.write(b); @@ -229,7 +248,7 @@ public void write(int b) throws IOException { } } - /** {@inheritDoc} */ + @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { if (!exists(f)) { @@ -242,7 +261,6 @@ public FSDataOutputStream append(Path f, int bufferSize, new LocalFSFileOutputStream(f, true), bufferSize), statistics); } - /** {@inheritDoc} */ @Override public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) @@ -264,7 +282,6 @@ private FSDataOutputStream create(Path f, boolean overwrite, new LocalFSFileOutputStream(f, false), bufferSize), statistics); } - /** {@inheritDoc} */ @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, @@ -276,7 +293,6 @@ public FSDataOutputStream create(Path f, FsPermission permission, return out; } - /** {@inheritDoc} */ @Override public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, boolean overwrite, @@ -288,6 +304,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, return out; } + @Override public boolean rename(Path src, Path dst) throws IOException { if (pathToFile(src).renameTo(pathToFile(dst))) { return true; @@ -302,6 +319,7 @@ public boolean rename(Path src, Path dst) throws IOException { * @return true if the file or directory and all its contents were deleted * @throws IOException if p is non-empty and recursive is false */ + @Override public boolean delete(Path p, boolean recursive) throws IOException { File f = pathToFile(p); if (f.isFile()) { @@ -319,6 +337,7 @@ public boolean delete(Path p, boolean recursive) throws IOException { * (Note: Returned list is not sorted in any given order, * due to reliance on Java's {@link File#list()} API.) */ + @Override public FileStatus[] listStatus(Path f) throws IOException { File localf = pathToFile(f); FileStatus[] results; @@ -356,6 +375,7 @@ public FileStatus[] listStatus(Path f) throws IOException { * Creates the specified directory hierarchy. Does not * treat existence as an error. */ + @Override public boolean mkdirs(Path f) throws IOException { if(f == null) { throw new IllegalArgumentException("mkdirs path arg is null"); @@ -373,7 +393,6 @@ public boolean mkdirs(Path f) throws IOException { (p2f.mkdir() || p2f.isDirectory()); } - /** {@inheritDoc} */ @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { boolean b = mkdirs(f); @@ -418,7 +437,6 @@ protected Path getInitialWorkingDirectory() { return this.makeQualified(new Path(System.getProperty("user.dir"))); } - /** {@inheritDoc} */ @Override public FsStatus getStatus(Path p) throws IOException { File partition = pathToFile(p == null ? new Path("/") : p); @@ -430,29 +448,35 @@ public FsStatus getStatus(Path p) throws IOException { } // In the case of the local filesystem, we can just rename the file. + @Override public void moveFromLocalFile(Path src, Path dst) throws IOException { rename(src, dst); } // We can write output directly to the final location + @Override public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { return fsOutputFile; } // It's in the right place - nothing to do. + @Override public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile) throws IOException { } + @Override public void close() throws IOException { super.close(); } + @Override public String toString() { return "LocalFS"; } + @Override public FileStatus getFileStatus(Path f) throws IOException { File path = pathToFile(f); if (path.exists()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java index 07870df1a6..1820c6619e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java @@ -263,6 +263,7 @@ private class Emptier implements Runnable { } } + @Override public void run() { if (emptierInterval == 0) return; // trash disabled diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java index 1c19ce27fb..99ca4fbb80 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java @@ -262,6 +262,7 @@ public void close() throws IOException { } /** This optional operation is not yet supported. */ + @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { throw new IOException("Not supported"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java index d3ac019a94..beea508d5d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java @@ -51,19 +51,23 @@ public FTPInputStream(InputStream stream, FTPClient client, this.closed = false; } + @Override public long getPos() throws IOException { return pos; } // We don't support seek. + @Override public void seek(long pos) throws IOException { throw new IOException("Seek not supported"); } + @Override public boolean seekToNewSource(long targetPos) throws IOException { throw new IOException("Seek not supported"); } + @Override public synchronized int read() throws IOException { if (closed) { throw new IOException("Stream closed"); @@ -79,6 +83,7 @@ public synchronized int read() throws IOException { return byteRead; } + @Override public synchronized int read(byte buf[], int off, int len) throws IOException { if (closed) { throw new IOException("Stream closed"); @@ -95,6 +100,7 @@ public synchronized int read(byte buf[], int off, int len) throws IOException { return result; } + @Override public synchronized void close() throws IOException { if (closed) { throw new IOException("Stream closed"); @@ -116,14 +122,17 @@ public synchronized void close() throws IOException { // Not supported. + @Override public boolean markSupported() { return false; } + @Override public void mark(int readLimit) { // Do nothing } + @Override public void reset() throws IOException { throw new IOException("Mark not supported"); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java index 88b28ed434..0d77a78c87 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java @@ -50,22 +50,27 @@ public KFSImpl(String metaServerHost, int metaServerPort, statistics = stats; } + @Override public boolean exists(String path) throws IOException { return kfsAccess.kfs_exists(path); } + @Override public boolean isDirectory(String path) throws IOException { return kfsAccess.kfs_isDirectory(path); } + @Override public boolean isFile(String path) throws IOException { return kfsAccess.kfs_isFile(path); } + @Override public String[] readdir(String path) throws IOException { return kfsAccess.kfs_readdir(path); } + @Override public FileStatus[] readdirplus(Path path) throws IOException { String srep = path.toUri().getPath(); KfsFileAttr[] fattr = kfsAccess.kfs_readdirplus(srep); @@ -100,52 +105,64 @@ public FileStatus[] readdirplus(Path path) throws IOException { } + @Override public int mkdirs(String path) throws IOException { return kfsAccess.kfs_mkdirs(path); } + @Override public int rename(String source, String dest) throws IOException { return kfsAccess.kfs_rename(source, dest); } + @Override public int rmdir(String path) throws IOException { return kfsAccess.kfs_rmdir(path); } + @Override public int remove(String path) throws IOException { return kfsAccess.kfs_remove(path); } + @Override public long filesize(String path) throws IOException { return kfsAccess.kfs_filesize(path); } + @Override public short getReplication(String path) throws IOException { return kfsAccess.kfs_getReplication(path); } + @Override public short setReplication(String path, short replication) throws IOException { return kfsAccess.kfs_setReplication(path, replication); } + @Override public String[][] getDataLocation(String path, long start, long len) throws IOException { return kfsAccess.kfs_getDataLocation(path, start, len); } + @Override public long getModificationTime(String path) throws IOException { return kfsAccess.kfs_getModificationTime(path); } + @Override public FSDataInputStream open(String path, int bufferSize) throws IOException { return new FSDataInputStream(new KFSInputStream(kfsAccess, path, statistics)); } + @Override public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException { return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, replication, false, progress), statistics); } + @Override public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException { // when opening for append, # of replicas is ignored return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, (short) 1, true, progress), diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java index 04c937b848..492230f064 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java @@ -53,6 +53,7 @@ public KFSInputStream(KfsAccess kfsAccess, String path, this.fsize = 0; } + @Override public long getPos() throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -60,6 +61,7 @@ public long getPos() throws IOException { return kfsChannel.tell(); } + @Override public synchronized int available() throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -67,6 +69,7 @@ public synchronized int available() throws IOException { return (int) (this.fsize - getPos()); } + @Override public synchronized void seek(long targetPos) throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -74,10 +77,12 @@ public synchronized void seek(long targetPos) throws IOException { kfsChannel.seek(targetPos); } + @Override public synchronized boolean seekToNewSource(long targetPos) throws IOException { return false; } + @Override public synchronized int read() throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -93,6 +98,7 @@ public synchronized int read() throws IOException { return -1; } + @Override public synchronized int read(byte b[], int off, int len) throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -109,6 +115,7 @@ public synchronized int read(byte b[], int off, int len) throws IOException { return res; } + @Override public synchronized void close() throws IOException { if (kfsChannel == null) { return; @@ -118,14 +125,17 @@ public synchronized void close() throws IOException { kfsChannel = null; } + @Override public boolean markSupported() { return false; } + @Override public void mark(int readLimit) { // Do nothing } + @Override public void reset() throws IOException { throw new IOException("Mark not supported"); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java index 59cea357e6..a50f750733 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java @@ -20,15 +20,10 @@ package org.apache.hadoop.fs.kfs; import java.io.*; -import java.net.*; -import java.util.*; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.util.Progressable; import org.kosmix.kosmosfs.access.KfsAccess; @@ -60,6 +55,7 @@ public long getPos() throws IOException { return kfsChannel.tell(); } + @Override public void write(int v) throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -70,6 +66,7 @@ public void write(int v) throws IOException { write(b, 0, 1); } + @Override public void write(byte b[], int off, int len) throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -80,6 +77,7 @@ public void write(byte b[], int off, int len) throws IOException { kfsChannel.write(ByteBuffer.wrap(b, off, len)); } + @Override public void flush() throws IOException { if (kfsChannel == null) { throw new IOException("File closed"); @@ -89,6 +87,7 @@ public void flush() throws IOException { kfsChannel.sync(); } + @Override public synchronized void close() throws IOException { if (kfsChannel == null) { return; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java index af3d5148d5..972a410b53 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java @@ -40,6 +40,7 @@ public class FsPermission implements Writable { private static final Log LOG = LogFactory.getLog(FsPermission.class); static final WritableFactory FACTORY = new WritableFactory() { + @Override public Writable newInstance() { return new FsPermission(); } }; static { // register a ctor @@ -124,12 +125,12 @@ public void fromShort(short n) { set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7], (((n >>> 9) & 1) == 1) ); } - /** {@inheritDoc} */ + @Override public void write(DataOutput out) throws IOException { out.writeShort(toShort()); } - /** {@inheritDoc} */ + @Override public void readFields(DataInput in) throws IOException { fromShort(in.readShort()); } @@ -155,7 +156,7 @@ public short toShort() { return (short)s; } - /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { if (obj instanceof FsPermission) { FsPermission that = (FsPermission)obj; @@ -167,10 +168,10 @@ public boolean equals(Object obj) { return false; } - /** {@inheritDoc} */ + @Override public int hashCode() {return toShort();} - /** {@inheritDoc} */ + @Override public String toString() { String str = useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL; if(stickyBit) { @@ -300,9 +301,11 @@ private static class ImmutableFsPermission extends FsPermission { public ImmutableFsPermission(short permission) { super(permission); } + @Override public FsPermission applyUMask(FsPermission umask) { throw new UnsupportedOperationException(); } + @Override public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java index f47226f1e2..bc9e392a87 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java @@ -32,6 +32,7 @@ @InterfaceStability.Unstable public class PermissionStatus implements Writable { static final WritableFactory FACTORY = new WritableFactory() { + @Override public Writable newInstance() { return new PermissionStatus(); } }; static { // register a ctor @@ -42,9 +43,11 @@ public class PermissionStatus implements Writable { public static PermissionStatus createImmutable( String user, String group, FsPermission permission) { return new PermissionStatus(user, group, permission) { + @Override public PermissionStatus applyUMask(FsPermission umask) { throw new UnsupportedOperationException(); } + @Override public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } @@ -82,14 +85,14 @@ public PermissionStatus applyUMask(FsPermission umask) { return this; } - /** {@inheritDoc} */ + @Override public void readFields(DataInput in) throws IOException { username = Text.readString(in, Text.DEFAULT_MAX_LEN); groupname = Text.readString(in, Text.DEFAULT_MAX_LEN); permission = FsPermission.read(in); } - /** {@inheritDoc} */ + @Override public void write(DataOutput out) throws IOException { write(out, username, groupname, permission); } @@ -115,7 +118,7 @@ public static void write(DataOutput out, permission.write(out); } - /** {@inheritDoc} */ + @Override public String toString() { return username + ":" + groupname + ":" + permission; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java index 6667d62189..4adc306633 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java @@ -83,6 +83,7 @@ class Jets3tFileSystemStore implements FileSystemStore { private static final Log LOG = LogFactory.getLog(Jets3tFileSystemStore.class.getName()); + @Override public void initialize(URI uri, Configuration conf) throws IOException { this.conf = conf; @@ -108,6 +109,7 @@ public void initialize(URI uri, Configuration conf) throws IOException { ); } + @Override public String getVersion() throws IOException { return FILE_SYSTEM_VERSION_VALUE; } @@ -123,14 +125,17 @@ private void delete(String key) throws IOException { } } + @Override public void deleteINode(Path path) throws IOException { delete(pathToKey(path)); } + @Override public void deleteBlock(Block block) throws IOException { delete(blockToKey(block)); } + @Override public boolean inodeExists(Path path) throws IOException { InputStream in = get(pathToKey(path), true); if (in == null) { @@ -140,6 +145,7 @@ public boolean inodeExists(Path path) throws IOException { return true; } + @Override public boolean blockExists(long blockId) throws IOException { InputStream in = get(blockToKey(blockId), false); if (in == null) { @@ -203,10 +209,12 @@ private void checkMetadata(S3Object object) throws S3FileSystemException, } } + @Override public INode retrieveINode(Path path) throws IOException { return INode.deserialize(get(pathToKey(path), true)); } + @Override public File retrieveBlock(Block block, long byteRangeStart) throws IOException { File fileBlock = null; @@ -249,6 +257,7 @@ private File newBackupFile() throws IOException { return result; } + @Override public Set listSubPaths(Path path) throws IOException { try { String prefix = pathToKey(path); @@ -270,6 +279,7 @@ public Set listSubPaths(Path path) throws IOException { } } + @Override public Set listDeepSubPaths(Path path) throws IOException { try { String prefix = pathToKey(path); @@ -311,10 +321,12 @@ private void put(String key, InputStream in, long length, boolean storeMetadata) } } + @Override public void storeINode(Path path, INode inode) throws IOException { put(pathToKey(path), inode.serialize(), inode.getSerializedLength(), true); } + @Override public void storeBlock(Block block, File file) throws IOException { BufferedInputStream in = null; try { @@ -354,6 +366,7 @@ private String blockToKey(Block block) { return blockToKey(block.getId()); } + @Override public void purge() throws IOException { try { S3Object[] objects = s3Service.listObjects(bucket); @@ -368,6 +381,7 @@ public void purge() throws IOException { } } + @Override public void dump() throws IOException { StringBuilder sb = new StringBuilder("S3 Filesystem, "); sb.append(bucket.getName()).append("\n"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java index f82755781e..416bfb17c4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java @@ -61,6 +61,7 @@ public static void main(String[] args) throws Exception { System.exit(res); } + @Override public int run(String[] args) throws Exception { if (args.length == 0) { @@ -195,6 +196,7 @@ interface Store { class UnversionedStore implements Store { + @Override public Set listAllPaths() throws IOException { try { String prefix = urlEncode(Path.SEPARATOR); @@ -212,6 +214,7 @@ public Set listAllPaths() throws IOException { } } + @Override public void deleteINode(Path path) throws IOException { delete(pathToKey(path)); } @@ -227,6 +230,7 @@ private void delete(String key) throws IOException { } } + @Override public INode retrieveINode(Path path) throws IOException { return INode.deserialize(get(pathToKey(path))); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java index 5a5d628adb..81ef31446e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java @@ -206,6 +206,7 @@ public FileStatus[] listStatus(Path f) throws IOException { } /** This optional operation is not yet supported. */ + @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { throw new IOException("Not supported"); @@ -298,6 +299,7 @@ private boolean renameRecursive(Path src, Path dst) throws IOException { return true; } + @Override public boolean delete(Path path, boolean recursive) throws IOException { Path absolutePath = makeAbsolute(path); INode inode = store.retrieveINode(absolutePath); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java index c2293ba682..400419c110 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java @@ -49,6 +49,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore { private S3Service s3Service; private S3Bucket bucket; + @Override public void initialize(URI uri, Configuration conf) throws IOException { S3Credentials s3Credentials = new S3Credentials(); s3Credentials.initialize(uri, conf); @@ -63,6 +64,7 @@ public void initialize(URI uri, Configuration conf) throws IOException { bucket = new S3Bucket(uri.getHost()); } + @Override public void storeFile(String key, File file, byte[] md5Hash) throws IOException { @@ -90,6 +92,7 @@ public void storeFile(String key, File file, byte[] md5Hash) } } + @Override public void storeEmptyFile(String key) throws IOException { try { S3Object object = new S3Object(key); @@ -102,6 +105,7 @@ public void storeEmptyFile(String key) throws IOException { } } + @Override public FileMetadata retrieveMetadata(String key) throws IOException { try { S3Object object = s3Service.getObjectDetails(bucket, key); @@ -117,6 +121,7 @@ public FileMetadata retrieveMetadata(String key) throws IOException { } } + @Override public InputStream retrieve(String key) throws IOException { try { S3Object object = s3Service.getObject(bucket, key); @@ -127,6 +132,7 @@ public InputStream retrieve(String key) throws IOException { } } + @Override public InputStream retrieve(String key, long byteRangeStart) throws IOException { try { @@ -139,11 +145,13 @@ public InputStream retrieve(String key, long byteRangeStart) } } + @Override public PartialListing list(String prefix, int maxListingLength) throws IOException { return list(prefix, maxListingLength, null, false); } + @Override public PartialListing list(String prefix, int maxListingLength, String priorLastKey, boolean recurse) throws IOException { @@ -175,6 +183,7 @@ private PartialListing list(String prefix, String delimiter, } } + @Override public void delete(String key) throws IOException { try { s3Service.deleteObject(bucket, key); @@ -183,6 +192,7 @@ public void delete(String key) throws IOException { } } + @Override public void copy(String srcKey, String dstKey) throws IOException { try { s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(), @@ -192,6 +202,7 @@ public void copy(String srcKey, String dstKey) throws IOException { } } + @Override public void purge(String prefix) throws IOException { try { S3Object[] objects = s3Service.listObjects(bucket, prefix, null); @@ -203,6 +214,7 @@ public void purge(String prefix) throws IOException { } } + @Override public void dump() throws IOException { StringBuilder sb = new StringBuilder("S3 Native Filesystem, "); sb.append(bucket.getName()).append("\n"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java index eea429a97e..e1aeea94ac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java @@ -150,6 +150,7 @@ protected IllegalNumberOfArgumentsException(int want, int got) { actual = got; } + @Override public String getMessage() { return "expected " + expected + " but got " + actual; } @@ -165,6 +166,7 @@ public TooManyArgumentsException(int expected, int actual) { super(expected, actual); } + @Override public String getMessage() { return "Too many arguments: " + super.getMessage(); } @@ -180,6 +182,7 @@ public NotEnoughArgumentsException(int expected, int actual) { super(expected, actual); } + @Override public String getMessage() { return "Not enough arguments: " + super.getMessage(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java index 71bfc9510d..bc1d8af951 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java @@ -114,6 +114,7 @@ private boolean moveToTrash(PathData item) throws IOException { static class Rmr extends Rm { public static final String NAME = "rmr"; + @Override protected void processOptions(LinkedList args) throws IOException { args.addFirst("-r"); super.processOptions(args); @@ -136,6 +137,7 @@ static class Rmdir extends FsCommand { private boolean ignoreNonEmpty = false; + @Override protected void processOptions(LinkedList args) throws IOException { CommandFormat cf = new CommandFormat( 1, Integer.MAX_VALUE, "-ignore-fail-on-non-empty"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java index 5ae0d67c57..8d598012ec 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java @@ -161,6 +161,7 @@ public TextRecordInputStream(FileStatus f) throws IOException { outbuf = new DataOutputBuffer(); } + @Override public int read() throws IOException { int ret; if (null == inbuf || -1 == (ret = inbuf.read())) { @@ -180,6 +181,7 @@ public int read() throws IOException { return ret; } + @Override public void close() throws IOException { r.close(); super.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java index 3f397327de..2541be393b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java @@ -73,6 +73,7 @@ public String getCommandName() { // abstract method that normally is invoked by runall() which is // overridden below + @Override protected void run(Path path) throws IOException { throw new RuntimeException("not supposed to get here"); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java index b53d2820de..04574cf673 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java @@ -380,6 +380,7 @@ private static int findLongestDirPrefix(String cwd, String path, boolean isDir) * as given on the commandline, or the full path * @return String of the path */ + @Override public String toString() { String scheme = uri.getScheme(); // No interpretation of symbols. Just decode % escaped chars. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index 85426fa4ff..95d0a2d456 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -102,6 +102,7 @@ public ChRootedFileSystem(final URI uri, Configuration conf) * for this FileSystem * @param conf the configuration */ + @Override public void initialize(final URI name, final Configuration conf) throws IOException { super.initialize(name, conf); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java index f92108cfe7..143ce68ebb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java @@ -20,10 +20,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; - import org.apache.hadoop.fs.Path; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 1c0c8dac4d..6031daf118 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -164,6 +164,7 @@ public String getScheme() { * this FileSystem * @param conf the configuration */ + @Override public void initialize(final URI theUri, final Configuration conf) throws IOException { super.initialize(theUri, conf); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java index 871e3d8a63..e0f62e453b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java @@ -42,7 +42,8 @@ public boolean equals(Object o) { return super.equals(o); } - public int hashCode() { + @Override + public int hashCode() { return super.hashCode(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index a4ed255deb..5287581073 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -892,6 +892,7 @@ private String createWithRetries(final String path, final byte[] data, final List acl, final CreateMode mode) throws InterruptedException, KeeperException { return zkDoWithRetries(new ZKAction() { + @Override public String run() throws KeeperException, InterruptedException { return zkClient.create(path, data, acl, mode); } @@ -901,6 +902,7 @@ public String run() throws KeeperException, InterruptedException { private byte[] getDataWithRetries(final String path, final boolean watch, final Stat stat) throws InterruptedException, KeeperException { return zkDoWithRetries(new ZKAction() { + @Override public byte[] run() throws KeeperException, InterruptedException { return zkClient.getData(path, watch, stat); } @@ -910,6 +912,7 @@ public byte[] run() throws KeeperException, InterruptedException { private Stat setDataWithRetries(final String path, final byte[] data, final int version) throws InterruptedException, KeeperException { return zkDoWithRetries(new ZKAction() { + @Override public Stat run() throws KeeperException, InterruptedException { return zkClient.setData(path, data, version); } @@ -919,6 +922,7 @@ public Stat run() throws KeeperException, InterruptedException { private void deleteWithRetries(final String path, final int version) throws KeeperException, InterruptedException { zkDoWithRetries(new ZKAction() { + @Override public Void run() throws KeeperException, InterruptedException { zkClient.delete(path, version); return null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java index d4ae0899fb..85912c7c76 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java @@ -56,6 +56,7 @@ public enum HAServiceState { this.name = name; } + @Override public String toString() { return name; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java index 06fb648f42..4898b38726 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java @@ -184,6 +184,7 @@ private FenceMethodWithArg(FenceMethod method, String arg) { this.arg = arg; } + @Override public String toString() { return method.getClass().getCanonicalName() + "(" + arg + ")"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java index 537fba942d..343693e95c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java @@ -274,6 +274,7 @@ private static class LogAdapter implements com.jcraft.jsch.Logger { static final Log LOG = LogFactory.getLog( SshFenceByTcpPort.class.getName() + ".jsch"); + @Override public boolean isEnabled(int level) { switch (level) { case com.jcraft.jsch.Logger.DEBUG: @@ -291,6 +292,7 @@ public boolean isEnabled(int level) { } } + @Override public void log(int level, String message) { switch (level) { case com.jcraft.jsch.Logger.DEBUG: diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index 7bf3c16e8c..77e9e1601a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -474,7 +474,7 @@ public void addInternalServlet(String name, String pathSpec, } } - /** {@inheritDoc} */ + @Override public void addFilter(String name, String classname, Map parameters) { @@ -494,7 +494,7 @@ public void addFilter(String name, String classname, filterNames.add(name); } - /** {@inheritDoc} */ + @Override public void addGlobalFilter(String name, String classname, Map parameters) { final String[] ALL_URLS = { "/*" }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java index bb2f163fe4..6bd9efc689 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java @@ -164,16 +164,18 @@ protected AbstractMapWritable() { } /** @return the conf */ + @Override public Configuration getConf() { return conf.get(); } /** @param conf the conf to set */ + @Override public void setConf(Configuration conf) { this.conf.set(conf); } - /** {@inheritDoc} */ + @Override public void write(DataOutput out) throws IOException { // First write out the size of the class table and any classes that are @@ -187,7 +189,7 @@ public void write(DataOutput out) throws IOException { } } - /** {@inheritDoc} */ + @Override public void readFields(DataInput in) throws IOException { // Get the number of "unknown" classes diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java index 875d6efdc2..122aa5ca1e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java @@ -88,6 +88,7 @@ public Object toArray() { public Writable[] get() { return values; } + @Override public void readFields(DataInput in) throws IOException { values = new Writable[in.readInt()]; // construct values for (int i = 0; i < values.length; i++) { @@ -97,6 +98,7 @@ public void readFields(DataInput in) throws IOException { } } + @Override public void write(DataOutput out) throws IOException { out.writeInt(values.length); // write values for (int i = 0; i < values.length; i++) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java index 71279b4f6d..0079079a79 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java @@ -57,12 +57,14 @@ public boolean get() { /** */ + @Override public void readFields(DataInput in) throws IOException { value = in.readBoolean(); } /** */ + @Override public void write(DataOutput out) throws IOException { out.writeBoolean(value); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java index ff926c11c1..ffcdea2c9a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java @@ -39,10 +39,12 @@ public ByteWritable() {} /** Return the value of this ByteWritable. */ public byte get() { return value; } + @Override public void readFields(DataInput in) throws IOException { value = in.readByte(); } + @Override public void write(DataOutput out) throws IOException { out.writeByte(value); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java index 012a3bc9d7..7e42a36cb7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java @@ -81,6 +81,7 @@ public byte[] copyBytes() { * if you need the returned array to be precisely the length of the data. * @return The data is only valid between 0 and getLength() - 1. */ + @Override public byte[] getBytes() { return bytes; } @@ -97,6 +98,7 @@ public byte[] get() { /** * Get the current size of the buffer. */ + @Override public int getLength() { return size; } @@ -171,6 +173,7 @@ public void set(byte[] newData, int offset, int length) { } // inherit javadoc + @Override public void readFields(DataInput in) throws IOException { setSize(0); // clear the old data setSize(in.readInt()); @@ -178,6 +181,7 @@ public void readFields(DataInput in) throws IOException { } // inherit javadoc + @Override public void write(DataOutput out) throws IOException { out.writeInt(size); out.write(bytes, 0, size); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java index ad3164b2d2..6550e1f2fd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java @@ -45,6 +45,7 @@ public abstract class CompressedWritable implements Writable { public CompressedWritable() {} + @Override public final void readFields(DataInput in) throws IOException { compressed = new byte[in.readInt()]; in.readFully(compressed, 0, compressed.length); @@ -70,6 +71,7 @@ protected void ensureInflated() { protected abstract void readFieldsCompressed(DataInput in) throws IOException; + @Override public final void write(DataOutput out) throws IOException { if (compressed == null) { ByteArrayOutputStream deflated = new ByteArrayOutputStream(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java index 469d3ff863..2cd59d75dc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java @@ -21,8 +21,6 @@ import java.io.DataInputStream; import java.io.InputStream; import java.nio.ByteBuffer; -import java.util.LinkedList; -import java.util.List; public class DataInputByteBuffer extends DataInputStream { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java index 6cd1f49722..2b8e259464 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java @@ -72,6 +72,7 @@ public DefaultStringifier(Configuration conf, Class c) { } } + @Override public T fromString(String str) throws IOException { try { byte[] bytes = Base64.decodeBase64(str.getBytes("UTF-8")); @@ -83,6 +84,7 @@ public T fromString(String str) throws IOException { } } + @Override public String toString(T obj) throws IOException { outBuf.reset(); serializer.serialize(obj); @@ -91,6 +93,7 @@ public String toString(T obj) throws IOException { return new String(Base64.encodeBase64(buf)); } + @Override public void close() throws IOException { inBuf.close(); outBuf.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java index a984cd4ef5..5cc326fe3c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java @@ -42,10 +42,12 @@ public DoubleWritable(double value) { set(value); } + @Override public void readFields(DataInput in) throws IOException { value = in.readDouble(); } + @Override public void write(DataOutput out) throws IOException { out.writeDouble(value); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java index c1ff1ca3bf..dc430cc29c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.EnumSet; import java.util.Iterator; -import java.util.Collection; import java.util.AbstractCollection; import org.apache.hadoop.classification.InterfaceAudience; @@ -46,8 +45,11 @@ public class EnumSetWritable> extends AbstractCollection EnumSetWritable() { } + @Override public Iterator iterator() { return value.iterator(); } + @Override public int size() { return value.size(); } + @Override public boolean add(E e) { if (value == null) { value = EnumSet.of(e); @@ -109,7 +111,7 @@ public EnumSet get() { return value; } - /** {@inheritDoc} */ + @Override @SuppressWarnings("unchecked") public void readFields(DataInput in) throws IOException { int length = in.readInt(); @@ -127,7 +129,7 @@ else if (length == 0) { } } - /** {@inheritDoc} */ + @Override public void write(DataOutput out) throws IOException { if (this.value == null) { out.writeInt(-1); @@ -152,6 +154,7 @@ public void write(DataOutput out) throws IOException { * Returns true if o is an EnumSetWritable with the same value, * or both are null. */ + @Override public boolean equals(Object o) { if (o == null) { throw new IllegalArgumentException("null argument passed in equal()."); @@ -180,27 +183,25 @@ public Class getElementType() { return elementType; } - /** {@inheritDoc} */ + @Override public int hashCode() { if (value == null) return 0; return (int) value.hashCode(); } - /** {@inheritDoc} */ + @Override public String toString() { if (value == null) return "(null)"; return value.toString(); } - /** {@inheritDoc} */ @Override public Configuration getConf() { return this.conf; } - /** {@inheritDoc} */ @Override public void setConf(Configuration conf) { this.conf = conf; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java index 4ade2c4d62..21e4cc4f5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java @@ -39,10 +39,12 @@ public FloatWritable() {} /** Return the value of this FloatWritable. */ public float get() { return value; } + @Override public void readFields(DataInput in) throws IOException { value = in.readFloat(); } + @Override public void write(DataOutput out) throws IOException { out.writeFloat(value); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java index 8268a5a915..7cfeed7f93 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java @@ -114,11 +114,13 @@ public Writable get() { return instance; } + @Override public String toString() { return "GW[" + (instance != null ? ("class=" + instance.getClass().getName() + ",value=" + instance.toString()) : "(null)") + "]"; } + @Override public void readFields(DataInput in) throws IOException { type = in.readByte(); Class clazz = getTypes()[type & 0xff]; @@ -131,6 +133,7 @@ public void readFields(DataInput in) throws IOException { instance.readFields(in); } + @Override public void write(DataOutput out) throws IOException { if (type == NOT_SET || instance == null) throw new IOException("The GenericWritable has NOT been set correctly. type=" @@ -145,10 +148,12 @@ public void write(DataOutput out) throws IOException { */ abstract protected Class[] getTypes(); + @Override public Configuration getConf() { return conf; } + @Override public void setConf(Configuration conf) { this.conf = conf; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java index 819f075812..a3315a869e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java @@ -272,9 +272,11 @@ public static void closeSocket(Socket sock) { * The /dev/null of OutputStreams. */ public static class NullOutputStream extends OutputStream { + @Override public void write(byte[] b, int off, int len) throws IOException { } + @Override public void write(int b) throws IOException { } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java index 6a44d81db6..f656d028cb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java @@ -42,10 +42,12 @@ public IntWritable() {} /** Return the value of this IntWritable. */ public int get() { return value; } + @Override public void readFields(DataInput in) throws IOException { value = in.readInt(); } + @Override public void write(DataOutput out) throws IOException { out.writeInt(value); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java index b9d64d904d..6dec4aa618 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java @@ -42,15 +42,18 @@ public LongWritable() {} /** Return the value of this LongWritable. */ public long get() { return value; } + @Override public void readFields(DataInput in) throws IOException { value = in.readLong(); } + @Override public void write(DataOutput out) throws IOException { out.writeLong(value); } /** Returns true iff o is a LongWritable with the same value. */ + @Override public boolean equals(Object o) { if (!(o instanceof LongWritable)) return false; @@ -58,17 +61,20 @@ public boolean equals(Object o) { return this.value == other.value; } + @Override public int hashCode() { return (int)value; } /** Compares two LongWritables. */ + @Override public int compareTo(LongWritable o) { long thisValue = this.value; long thatValue = o.value; return (thisValue { public static final int MD5_LEN = 16; private static ThreadLocal DIGESTER_FACTORY = new ThreadLocal() { + @Override protected MessageDigest initialValue() { try { return MessageDigest.getInstance("MD5"); @@ -65,6 +66,7 @@ public MD5Hash(byte[] digest) { } // javadoc from Writable + @Override public void readFields(DataInput in) throws IOException { in.readFully(digest); } @@ -77,6 +79,7 @@ public static MD5Hash read(DataInput in) throws IOException { } // javadoc from Writable + @Override public void write(DataOutput out) throws IOException { out.write(digest); } @@ -155,6 +158,7 @@ public int quarterDigest() { /** Returns true iff o is an MD5Hash whose digest contains the * same values. */ + @Override public boolean equals(Object o) { if (!(o instanceof MD5Hash)) return false; @@ -165,12 +169,14 @@ public boolean equals(Object o) { /** Returns a hash code value for this object. * Only uses the first 4 bytes, since md5s are evenly distributed. */ + @Override public int hashCode() { return quarterDigest(); } /** Compares this object with the specified object for order.*/ + @Override public int compareTo(MD5Hash that) { return WritableComparator.compareBytes(this.digest, 0, MD5_LEN, that.digest, 0, MD5_LEN); @@ -182,6 +188,7 @@ public Comparator() { super(MD5Hash.class); } + @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { return compareBytes(b1, s1, MD5_LEN, b2, s2, MD5_LEN); @@ -196,6 +203,7 @@ public int compare(byte[] b1, int s1, int l1, {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; /** Returns a string representation of this object. */ + @Override public String toString() { StringBuilder buf = new StringBuilder(MD5_LEN*2); for (int i = 0; i < MD5_LEN; i++) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java index 9c14402d75..7e7d855f82 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java @@ -296,6 +296,7 @@ public static void setIndexInterval(Configuration conf, int interval) { } /** Close the map. */ + @Override public synchronized void close() throws IOException { data.close(); index.close(); @@ -723,6 +724,7 @@ public synchronized WritableComparable getClosest(WritableComparable key, } /** Close the map. */ + @Override public synchronized void close() throws IOException { if (!indexClosed) { index.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java index 377c9c1656..72c7098d7a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java @@ -55,27 +55,27 @@ public MapWritable(MapWritable other) { copy(other); } - /** {@inheritDoc} */ + @Override public void clear() { instance.clear(); } - /** {@inheritDoc} */ + @Override public boolean containsKey(Object key) { return instance.containsKey(key); } - /** {@inheritDoc} */ + @Override public boolean containsValue(Object value) { return instance.containsValue(value); } - /** {@inheritDoc} */ + @Override public Set> entrySet() { return instance.entrySet(); } - /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { if (this == obj) { return true; @@ -93,27 +93,27 @@ public boolean equals(Object obj) { return false; } - /** {@inheritDoc} */ + @Override public Writable get(Object key) { return instance.get(key); } - /** {@inheritDoc} */ + @Override public int hashCode() { return 1 + this.instance.hashCode(); } - /** {@inheritDoc} */ + @Override public boolean isEmpty() { return instance.isEmpty(); } - /** {@inheritDoc} */ + @Override public Set keySet() { return instance.keySet(); } - /** {@inheritDoc} */ + @Override @SuppressWarnings("unchecked") public Writable put(Writable key, Writable value) { addToMap(key.getClass()); @@ -121,31 +121,30 @@ public Writable put(Writable key, Writable value) { return instance.put(key, value); } - /** {@inheritDoc} */ + @Override public void putAll(Map t) { for (Map.Entry e: t.entrySet()) { put(e.getKey(), e.getValue()); } } - /** {@inheritDoc} */ + @Override public Writable remove(Object key) { return instance.remove(key); } - /** {@inheritDoc} */ + @Override public int size() { return instance.size(); } - /** {@inheritDoc} */ + @Override public Collection values() { return instance.values(); } // Writable - /** {@inheritDoc} */ @Override public void write(DataOutput out) throws IOException { super.write(out); @@ -164,7 +163,6 @@ public void write(DataOutput out) throws IOException { } } - /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void readFields(DataInput in) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java index beb7b17ce7..77c590fdb6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java @@ -35,6 +35,7 @@ private NullWritable() {} // no public ctor /** Returns the single instance of this class. */ public static NullWritable get() { return THIS; } + @Override public String toString() { return "(null)"; } @@ -46,8 +47,11 @@ public String toString() { public int compareTo(NullWritable other) { return 0; } + @Override public boolean equals(Object other) { return other instanceof NullWritable; } + @Override public void readFields(DataInput in) throws IOException {} + @Override public void write(DataOutput out) throws IOException {} /** A Comparator "optimized" for NullWritable. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java index c555111097..0f0f5c7405 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java @@ -66,15 +66,18 @@ public void set(Object instance) { this.instance = instance; } + @Override public String toString() { return "OW[class=" + declaredClass + ",value=" + instance + "]"; } + @Override public void readFields(DataInput in) throws IOException { readObject(in, this, this.conf); } + @Override public void write(DataOutput out) throws IOException { writeObject(out, instance, declaredClass, conf); } @@ -99,6 +102,7 @@ public NullInstance(Class declaredClass, Configuration conf) { super(conf); this.declaredClass = declaredClass; } + @Override public void readFields(DataInput in) throws IOException { String className = UTF8.readString(in); declaredClass = PRIMITIVE_NAMES.get(className); @@ -110,6 +114,7 @@ public void readFields(DataInput in) throws IOException { } } } + @Override public void write(DataOutput out) throws IOException { UTF8.writeString(out, declaredClass.getName()); } @@ -375,10 +380,12 @@ public static Class loadClass(Configuration conf, String className) { return declaredClass; } + @Override public void setConf(Configuration conf) { this.conf = conf; } + @Override public Configuration getConf() { return this.conf; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java index b7605db9a9..15a396dc2b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java @@ -50,6 +50,7 @@ public class OutputBuffer extends FilterOutputStream { private static class Buffer extends ByteArrayOutputStream { public byte[] getData() { return buf; } public int getLength() { return count; } + @Override public void reset() { count = 0; } public void write(InputStream in, int len) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java index 046d9e4b73..f1545b69c9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java @@ -194,6 +194,7 @@ private ReadaheadRequestImpl(String identifier, FileDescriptor fd, long off, lon this.len = len; } + @Override public void run() { if (canceled) return; // There's a very narrow race here that the file will close right at diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java index 6bc798e7e3..b30c4a4da4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java @@ -24,7 +24,6 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java index 293fdbbb93..8a14860773 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java @@ -625,15 +625,18 @@ private void reset(DataInputStream in, int length) throws IOException { dataSize = length; } + @Override public int getSize() { return dataSize; } + @Override public void writeUncompressedBytes(DataOutputStream outStream) throws IOException { outStream.write(data, 0, dataSize); } + @Override public void writeCompressedBytes(DataOutputStream outStream) throws IllegalArgumentException, IOException { throw @@ -666,10 +669,12 @@ private void reset(DataInputStream in, int length) throws IOException { dataSize = length; } + @Override public int getSize() { return dataSize; } + @Override public void writeUncompressedBytes(DataOutputStream outStream) throws IOException { if (decompressedStream == null) { @@ -687,6 +692,7 @@ public void writeUncompressedBytes(DataOutputStream outStream) } } + @Override public void writeCompressedBytes(DataOutputStream outStream) throws IllegalArgumentException, IOException { outStream.write(data, 0, dataSize); @@ -728,6 +734,7 @@ public TreeMap getMetadata() { return new TreeMap(this.theMetadata); } + @Override public void write(DataOutput out) throws IOException { out.writeInt(this.theMetadata.size()); Iterator> iter = @@ -739,6 +746,7 @@ public void write(DataOutput out) throws IOException { } } + @Override public void readFields(DataInput in) throws IOException { int sz = in.readInt(); if (sz < 0) throw new IOException("Invalid size: " + sz + " for file metadata object"); @@ -752,6 +760,7 @@ public void readFields(DataInput in) throws IOException { } } + @Override public boolean equals(Object other) { if (other == null) { return false; @@ -788,11 +797,13 @@ public boolean equals(Metadata other) { return true; } + @Override public int hashCode() { assert false : "hashCode not designed"; return 42; // any arbitrary constant will do } + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("size: ").append(this.theMetadata.size()).append("\n"); @@ -1250,6 +1261,7 @@ public void hflush() throws IOException { Configuration getConf() { return conf; } /** Close the file. */ + @Override public synchronized void close() throws IOException { keySerializer.close(); uncompressedValSerializer.close(); @@ -1360,6 +1372,7 @@ static class RecordCompressWriter extends Writer { } /** Append a key/value pair. */ + @Override @SuppressWarnings("unchecked") public synchronized void append(Object key, Object val) throws IOException { @@ -1392,6 +1405,7 @@ public synchronized void append(Object key, Object val) } /** Append a key/value pair. */ + @Override public synchronized void appendRaw(byte[] keyData, int keyOffset, int keyLength, ValueBytes val) throws IOException { @@ -1449,6 +1463,7 @@ void writeBuffer(DataOutputBuffer uncompressedDataBuffer) } /** Compress and flush contents to dfs */ + @Override public synchronized void sync() throws IOException { if (noBufferedRecords > 0) { super.sync(); @@ -1478,6 +1493,7 @@ public synchronized void sync() throws IOException { } /** Close the file. */ + @Override public synchronized void close() throws IOException { if (out != null) { sync(); @@ -1486,6 +1502,7 @@ public synchronized void close() throws IOException { } /** Append a key/value pair. */ + @Override @SuppressWarnings("unchecked") public synchronized void append(Object key, Object val) throws IOException { @@ -1518,6 +1535,7 @@ public synchronized void append(Object key, Object val) } /** Append a key/value pair. */ + @Override public synchronized void appendRaw(byte[] keyData, int keyOffset, int keyLength, ValueBytes val) throws IOException { @@ -1960,6 +1978,7 @@ private Deserializer getDeserializer(SerializationFactory sf, Class c) { } /** Close the file. */ + @Override public synchronized void close() throws IOException { // Return the decompressors to the pool CodecPool.returnDecompressor(keyLenDecompressor); @@ -2618,6 +2637,7 @@ public synchronized long getPosition() throws IOException { } /** Returns the name of the file. */ + @Override public String toString() { return filename; } @@ -2948,6 +2968,7 @@ private void sort(int count) { mergeSort.mergeSort(pointersCopy, pointers, 0, count); } class SeqFileComparator implements Comparator { + @Override public int compare(IntWritable I, IntWritable J) { return comparator.compare(rawBuffer, keyOffsets[I.get()], keyLengths[I.get()], rawBuffer, @@ -3221,6 +3242,7 @@ public MergeQueue(List segments, this.tmpDir = tmpDir; this.progress = progress; } + @Override protected boolean lessThan(Object a, Object b) { // indicate we're making progress if (progress != null) { @@ -3232,6 +3254,7 @@ protected boolean lessThan(Object a, Object b) { msa.getKey().getLength(), msb.getKey().getData(), 0, msb.getKey().getLength()) < 0; } + @Override public void close() throws IOException { SegmentDescriptor ms; // close inputs while ((ms = (SegmentDescriptor)pop()) != null) { @@ -3239,12 +3262,15 @@ public void close() throws IOException { } minSegment = null; } + @Override public DataOutputBuffer getKey() throws IOException { return rawKey; } + @Override public ValueBytes getValue() throws IOException { return rawValue; } + @Override public boolean next() throws IOException { if (size() == 0) return false; @@ -3272,6 +3298,7 @@ public boolean next() throws IOException { return true; } + @Override public Progress getProgress() { return mergeProgress; } @@ -3469,6 +3496,7 @@ public boolean shouldPreserveInput() { return preserveInput; } + @Override public int compareTo(Object o) { SegmentDescriptor that = (SegmentDescriptor)o; if (this.segmentLength != that.segmentLength) { @@ -3481,6 +3509,7 @@ public int compareTo(Object o) { compareTo(that.segmentPathName.toString()); } + @Override public boolean equals(Object o) { if (!(o instanceof SegmentDescriptor)) { return false; @@ -3495,6 +3524,7 @@ public boolean equals(Object o) { return false; } + @Override public int hashCode() { return 37 * 17 + (int) (segmentOffset^(segmentOffset>>>32)); } @@ -3584,12 +3614,14 @@ public LinkedSegmentsDescriptor (long segmentOffset, long segmentLength, /** The default cleanup. Subclasses can override this with a custom * cleanup */ + @Override public void cleanup() throws IOException { super.close(); if (super.shouldPreserveInput()) return; parentContainer.cleanup(); } + @Override public boolean equals(Object o) { if (!(o instanceof LinkedSegmentsDescriptor)) { return false; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java index 9ba0023190..068ca9d40e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java @@ -87,6 +87,7 @@ public Reader(FileSystem fs, String dirName, WritableComparator comparator, Conf } // javadoc inherited + @Override public boolean seek(WritableComparable key) throws IOException { return super.seek(key); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java index d870a5fd84..eee744ec6a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java @@ -57,86 +57,86 @@ public SortedMapWritable(SortedMapWritable other) { copy(other); } - /** {@inheritDoc} */ + @Override public Comparator comparator() { // Returning null means we use the natural ordering of the keys return null; } - /** {@inheritDoc} */ + @Override public WritableComparable firstKey() { return instance.firstKey(); } - /** {@inheritDoc} */ + @Override public SortedMap headMap(WritableComparable toKey) { return instance.headMap(toKey); } - /** {@inheritDoc} */ + @Override public WritableComparable lastKey() { return instance.lastKey(); } - /** {@inheritDoc} */ + @Override public SortedMap subMap(WritableComparable fromKey, WritableComparable toKey) { return instance.subMap(fromKey, toKey); } - /** {@inheritDoc} */ + @Override public SortedMap tailMap(WritableComparable fromKey) { return instance.tailMap(fromKey); } - /** {@inheritDoc} */ + @Override public void clear() { instance.clear(); } - /** {@inheritDoc} */ + @Override public boolean containsKey(Object key) { return instance.containsKey(key); } - /** {@inheritDoc} */ + @Override public boolean containsValue(Object value) { return instance.containsValue(value); } - /** {@inheritDoc} */ + @Override public Set> entrySet() { return instance.entrySet(); } - /** {@inheritDoc} */ + @Override public Writable get(Object key) { return instance.get(key); } - /** {@inheritDoc} */ + @Override public boolean isEmpty() { return instance.isEmpty(); } - /** {@inheritDoc} */ + @Override public Set keySet() { return instance.keySet(); } - /** {@inheritDoc} */ + @Override public Writable put(WritableComparable key, Writable value) { addToMap(key.getClass()); addToMap(value.getClass()); return instance.put(key, value); } - /** {@inheritDoc} */ + @Override public void putAll(Map t) { for (Map.Entry e: t.entrySet()) { @@ -145,22 +145,21 @@ public void putAll(Map t) { } } - /** {@inheritDoc} */ + @Override public Writable remove(Object key) { return instance.remove(key); } - /** {@inheritDoc} */ + @Override public int size() { return instance.size(); } - /** {@inheritDoc} */ + @Override public Collection values() { return instance.values(); } - /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void readFields(DataInput in) throws IOException { @@ -187,7 +186,6 @@ public void readFields(DataInput in) throws IOException { } } - /** {@inheritDoc} */ @Override public void write(DataOutput out) throws IOException { super.write(out); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java index a7ee6876d4..949b14ae57 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java @@ -54,6 +54,7 @@ public interface Stringifier extends java.io.Closeable { * Closes this object. * @throws IOException if an I/O error occurs * */ + @Override public void close() throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java index a4f80ea886..95fb174a9d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java @@ -55,6 +55,7 @@ public class Text extends BinaryComparable private static ThreadLocal ENCODER_FACTORY = new ThreadLocal() { + @Override protected CharsetEncoder initialValue() { return Charset.forName("UTF-8").newEncoder(). onMalformedInput(CodingErrorAction.REPORT). @@ -64,6 +65,7 @@ protected CharsetEncoder initialValue() { private static ThreadLocal DECODER_FACTORY = new ThreadLocal() { + @Override protected CharsetDecoder initialValue() { return Charset.forName("UTF-8").newDecoder(). onMalformedInput(CodingErrorAction.REPORT). @@ -112,11 +114,13 @@ public byte[] copyBytes() { * valid. Please use {@link #copyBytes()} if you * need the returned array to be precisely the length of the data. */ + @Override public byte[] getBytes() { return bytes; } /** Returns the number of bytes in the byte array */ + @Override public int getLength() { return length; } @@ -281,6 +285,7 @@ public String toString() { /** deserialize */ + @Override public void readFields(DataInput in) throws IOException { int newLength = WritableUtils.readVInt(in); setCapacity(newLength, false); @@ -313,6 +318,7 @@ public static void skip(DataInput in) throws IOException { * length uses zero-compressed encoding * @see Writable#write(DataOutput) */ + @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, length); out.write(bytes, 0, length); @@ -329,6 +335,7 @@ public void write(DataOutput out, int maxLength) throws IOException { } /** Returns true iff o is a Text with the same contents. */ + @Override public boolean equals(Object o) { if (o instanceof Text) return super.equals(o); @@ -346,6 +353,7 @@ public Comparator() { super(Text.class); } + @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { int n1 = WritableUtils.decodeVIntSize(b1[s1]); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java index 76304623ee..cf8947d32d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java @@ -57,6 +57,7 @@ public Object toArray() { public Writable[][] get() { return values; } + @Override public void readFields(DataInput in) throws IOException { // construct matrix values = new Writable[in.readInt()][]; @@ -81,6 +82,7 @@ public void readFields(DataInput in) throws IOException { } } + @Override public void write(DataOutput out) throws IOException { out.writeInt(values.length); // write values for (int i = 0; i < values.length; i++) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java index 6a0f88673f..ef7512996c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java @@ -110,6 +110,7 @@ public void set(UTF8 other) { System.arraycopy(other.bytes, 0, bytes, 0, length); } + @Override public void readFields(DataInput in) throws IOException { length = in.readUnsignedShort(); if (bytes == null || bytes.length < length) @@ -123,6 +124,7 @@ public static void skip(DataInput in) throws IOException { WritableUtils.skipFully(in, length); } + @Override public void write(DataOutput out) throws IOException { out.writeShort(length); out.write(bytes, 0, length); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java index e37b144dbf..f537524c4b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java @@ -43,10 +43,12 @@ public VIntWritable() {} /** Return the value of this VIntWritable. */ public int get() { return value; } + @Override public void readFields(DataInput in) throws IOException { value = WritableUtils.readVInt(in); } + @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, value); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java index 869bf43914..a9fac30605 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java @@ -43,10 +43,12 @@ public VLongWritable() {} /** Return the value of this LongWritable. */ public long get() { return value; } + @Override public void readFields(DataInput in) throws IOException { value = WritableUtils.readVLong(in); } + @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVLong(out, value); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java index 162374be21..a72be58832 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java @@ -39,6 +39,7 @@ public VersionMismatchException(byte expectedVersionIn, byte foundVersionIn){ } /** Returns a string representation of this object. */ + @Override public String toString(){ return "A record version mismatch occured. Expecting v" + expectedVersion + ", found v" + foundVersion; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java index a197fd2e4f..c2db55520c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java @@ -40,11 +40,13 @@ public abstract class VersionedWritable implements Writable { public abstract byte getVersion(); // javadoc from Writable + @Override public void write(DataOutput out) throws IOException { out.writeByte(getVersion()); // store version } // javadoc from Writable + @Override public void readFields(DataInput in) throws IOException { byte version = in.readByte(); // read version if (version != getVersion()) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java index 6eb3a21443..eb3c8d322c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java @@ -120,6 +120,7 @@ public WritableComparable newKey() { * Writable#readFields(DataInput)}, then calls {@link * #compare(WritableComparable,WritableComparable)}. */ + @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { try { buffer.reset(b1, s1, l1); // parse key1 @@ -144,6 +145,7 @@ public int compare(WritableComparable a, WritableComparable b) { return a.compareTo(b); } + @Override public int compare(Object a, Object b) { return compare((WritableComparable)a, (WritableComparable)b); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java index a7a925f35a..35f7cb43ea 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java @@ -63,6 +63,7 @@ public BZip2Codec() { } * @throws java.io.IOException * Throws IO exception */ + @Override public CompressionOutputStream createOutputStream(OutputStream out) throws IOException { return new BZip2CompressionOutputStream(out); @@ -74,6 +75,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) * @return CompressionOutputStream @throws java.io.IOException */ + @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException { return createOutputStream(out); @@ -84,6 +86,7 @@ public CompressionOutputStream createOutputStream(OutputStream out, * * @return BZip2DummyCompressor.class */ + @Override public Class getCompressorType() { return BZip2DummyCompressor.class; } @@ -93,6 +96,7 @@ public Class getCompressorTy * * @return Compressor */ + @Override public Compressor createCompressor() { return new BZip2DummyCompressor(); } @@ -106,6 +110,7 @@ public Compressor createCompressor() { * @throws java.io.IOException * Throws IOException */ + @Override public CompressionInputStream createInputStream(InputStream in) throws IOException { return new BZip2CompressionInputStream(in); @@ -116,6 +121,7 @@ public CompressionInputStream createInputStream(InputStream in) * * @return CompressionInputStream */ + @Override public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException { return createInputStream(in); @@ -133,6 +139,7 @@ public CompressionInputStream createInputStream(InputStream in, * * @return CompressionInputStream for BZip2 aligned at block boundaries */ + @Override public SplitCompressionInputStream createInputStream(InputStream seekableIn, Decompressor decompressor, long start, long end, READ_MODE readMode) throws IOException { @@ -181,6 +188,7 @@ public SplitCompressionInputStream createInputStream(InputStream seekableIn, * * @return BZip2DummyDecompressor.class */ + @Override public Class getDecompressorType() { return BZip2DummyDecompressor.class; } @@ -190,6 +198,7 @@ public Class getDecompress * * @return Decompressor */ + @Override public Decompressor createDecompressor() { return new BZip2DummyDecompressor(); } @@ -199,6 +208,7 @@ public Decompressor createDecompressor() { * * @return A String telling the default bzip2 file extension */ + @Override public String getDefaultExtension() { return ".bz2"; } @@ -226,6 +236,7 @@ private void writeStreamHeader() throws IOException { } } + @Override public void finish() throws IOException { if (needsReset) { // In the case that nothing is written to this stream, we still need to @@ -245,12 +256,14 @@ private void internalReset() throws IOException { } } + @Override public void resetState() throws IOException { // Cannot write to out at this point because out might not be ready // yet, as in SequenceFile.Writer implementation. needsReset = true; } + @Override public void write(int b) throws IOException { if (needsReset) { internalReset(); @@ -258,6 +271,7 @@ public void write(int b) throws IOException { this.output.write(b); } + @Override public void write(byte[] b, int off, int len) throws IOException { if (needsReset) { internalReset(); @@ -265,6 +279,7 @@ public void write(byte[] b, int off, int len) throws IOException { this.output.write(b, off, len); } + @Override public void close() throws IOException { if (needsReset) { // In the case that nothing is written to this stream, we still need to @@ -382,6 +397,7 @@ private BufferedInputStream readStreamHeader() throws IOException { }// end of method + @Override public void close() throws IOException { if (!needsReset) { input.close(); @@ -417,6 +433,7 @@ public void close() throws IOException { * */ + @Override public int read(byte[] b, int off, int len) throws IOException { if (needsReset) { internalReset(); @@ -440,6 +457,7 @@ public int read(byte[] b, int off, int len) throws IOException { } + @Override public int read() throws IOException { byte b[] = new byte[1]; int result = this.read(b, 0, 1); @@ -454,6 +472,7 @@ private void internalReset() throws IOException { } } + @Override public void resetState() throws IOException { // Cannot read from bufferedIn at this point because bufferedIn // might not be ready @@ -461,6 +480,7 @@ public void resetState() throws IOException { needsReset = true; } + @Override public long getPos() { return this.compressedStreamPosition; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java index 5d854861f2..434183bbc2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java @@ -78,6 +78,7 @@ public BlockCompressorStream(OutputStream out, Compressor compressor) { * Each block contains the uncompressed length for the block, followed by * one or more length-prefixed blocks of compressed data. */ + @Override public void write(byte[] b, int off, int len) throws IOException { // Sanity checks if (compressor.finished()) { @@ -132,6 +133,7 @@ public void write(byte[] b, int off, int len) throws IOException { } } + @Override public void finish() throws IOException { if (!compressor.finished()) { rawWriteInt((int)compressor.getBytesRead()); @@ -142,6 +144,7 @@ public void finish() throws IOException { } } + @Override protected void compress() throws IOException { int len = compressor.compress(buffer, 0, buffer.length); if (len > 0) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java index 42ade89019..7d2504e3e2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java @@ -65,6 +65,7 @@ protected BlockDecompressorStream(InputStream in) throws IOException { super(in); } + @Override protected int decompress(byte[] b, int off, int len) throws IOException { // Check if we are the beginning of a block if (noUncompressedBytes == originalBlockSize) { @@ -104,6 +105,7 @@ protected int decompress(byte[] b, int off, int len) throws IOException { return n; } + @Override protected int getCompressedData() throws IOException { checkStream(); @@ -126,6 +128,7 @@ protected int getCompressedData() throws IOException { return len; } + @Override public void resetState() throws IOException { originalBlockSize = 0; noUncompressedBytes = 0; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java index dc95e9e999..57fb366bdd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java @@ -75,6 +75,7 @@ private void addCodec(CompressionCodec codec) { /** * Print the extension map out as a string. */ + @Override public String toString() { StringBuilder buf = new StringBuilder(); Iterator> itr = diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java index 4f7757dfed..4491819d72 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java @@ -55,6 +55,7 @@ protected CompressionInputStream(InputStream in) throws IOException { this.in = in; } + @Override public void close() throws IOException { in.close(); } @@ -63,6 +64,7 @@ public void close() throws IOException { * Read bytes from the stream. * Made abstract to prevent leakage to underlying stream. */ + @Override public abstract int read(byte[] b, int off, int len) throws IOException; /** @@ -76,6 +78,7 @@ public void close() throws IOException { * * @return Current position in stream as a long */ + @Override public long getPos() throws IOException { if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)){ //This way of getting the current position will not work for file @@ -95,6 +98,7 @@ public long getPos() throws IOException { * @throws UnsupportedOperationException */ + @Override public void seek(long pos) throws UnsupportedOperationException { throw new UnsupportedOperationException(); } @@ -104,6 +108,7 @@ public void seek(long pos) throws UnsupportedOperationException { * * @throws UnsupportedOperationException */ + @Override public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException { throw new UnsupportedOperationException(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java index b4a47946b2..9bd6b84f98 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java @@ -44,11 +44,13 @@ protected CompressionOutputStream(OutputStream out) { this.out = out; } + @Override public void close() throws IOException { finish(); out.close(); } + @Override public void flush() throws IOException { out.flush(); } @@ -57,6 +59,7 @@ public void flush() throws IOException { * Write compressed bytes to the stream. * Made abstract to prevent leakage to underlying stream. */ + @Override public abstract void write(byte[] b, int off, int len) throws IOException; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java index 4cd7425ba6..84f1b2f179 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java @@ -59,6 +59,7 @@ protected CompressorStream(OutputStream out) { super(out); } + @Override public void write(byte[] b, int off, int len) throws IOException { // Sanity checks if (compressor.finished()) { @@ -83,6 +84,7 @@ protected void compress() throws IOException { } } + @Override public void finish() throws IOException { if (!compressor.finished()) { compressor.finish(); @@ -92,10 +94,12 @@ public void finish() throws IOException { } } + @Override public void resetState() throws IOException { compressor.reset(); } + @Override public void close() throws IOException { if (!closed) { finish(); @@ -105,6 +109,7 @@ public void close() throws IOException { } private byte[] oneByte = new byte[1]; + @Override public void write(int b) throws IOException { oneByte[0] = (byte)(b & 0xff); write(oneByte, 0, oneByte.length); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java index d0ef6ee6d3..16e0ad763a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java @@ -66,11 +66,13 @@ protected DecompressorStream(InputStream in) throws IOException { } private byte[] oneByte = new byte[1]; + @Override public int read() throws IOException { checkStream(); return (read(oneByte, 0, oneByte.length) == -1) ? -1 : (oneByte[0] & 0xff); } + @Override public int read(byte[] b, int off, int len) throws IOException { checkStream(); @@ -163,11 +165,13 @@ protected void checkStream() throws IOException { } } + @Override public void resetState() throws IOException { decompressor.reset(); } private byte[] skipBytes = new byte[512]; + @Override public long skip(long n) throws IOException { // Sanity checks if (n < 0) { @@ -189,11 +193,13 @@ public long skip(long n) throws IOException { return skipped; } + @Override public int available() throws IOException { checkStream(); return (eof) ? 0 : 1; } + @Override public void close() throws IOException { if (!closed) { in.close(); @@ -201,13 +207,16 @@ public void close() throws IOException { } } + @Override public boolean markSupported() { return false; } + @Override public synchronized void mark(int readlimit) { } + @Override public synchronized void reset() throws IOException { throw new IOException("mark/reset not supported"); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java index 1be28bfce3..ea7df20de3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java @@ -37,14 +37,17 @@ public class DefaultCodec implements Configurable, CompressionCodec { Configuration conf; + @Override public void setConf(Configuration conf) { this.conf = conf; } + @Override public Configuration getConf() { return conf; } + @Override public CompressionOutputStream createOutputStream(OutputStream out) throws IOException { // This may leak memory if called in a loop. The createCompressor() call @@ -57,6 +60,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) conf.getInt("io.file.buffer.size", 4*1024)); } + @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException { @@ -64,20 +68,24 @@ public CompressionOutputStream createOutputStream(OutputStream out, conf.getInt("io.file.buffer.size", 4*1024)); } + @Override public Class getCompressorType() { return ZlibFactory.getZlibCompressorType(conf); } + @Override public Compressor createCompressor() { return ZlibFactory.getZlibCompressor(conf); } + @Override public CompressionInputStream createInputStream(InputStream in) throws IOException { return new DecompressorStream(in, createDecompressor(), conf.getInt("io.file.buffer.size", 4*1024)); } + @Override public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException { @@ -85,14 +93,17 @@ public CompressionInputStream createInputStream(InputStream in, conf.getInt("io.file.buffer.size", 4*1024)); } + @Override public Class getDecompressorType() { return ZlibFactory.getZlibDecompressorType(conf); } + @Override public Decompressor createDecompressor() { return ZlibFactory.getZlibDecompressor(conf); } + @Override public String getDefaultExtension() { return ".deflate"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java index b17fe4b39e..520205e166 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java @@ -20,15 +20,11 @@ import java.io.*; import java.util.zip.GZIPOutputStream; -import java.util.zip.GZIPInputStream; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.DefaultCodec; import org.apache.hadoop.io.compress.zlib.*; -import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel; -import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy; /** * This class creates gzip compressors/decompressors. @@ -66,32 +62,39 @@ protected GzipOutputStream(CompressorStream out) { super(out); } + @Override public void close() throws IOException { out.close(); } + @Override public void flush() throws IOException { out.flush(); } + @Override public void write(int b) throws IOException { out.write(b); } + @Override public void write(byte[] data, int offset, int length) throws IOException { out.write(data, offset, length); } + @Override public void finish() throws IOException { ((ResetableGZIPOutputStream) out).finish(); } + @Override public void resetState() throws IOException { ((ResetableGZIPOutputStream) out).resetState(); } } + @Override public CompressionOutputStream createOutputStream(OutputStream out) throws IOException { return (ZlibFactory.isNativeZlibLoaded(conf)) ? @@ -100,6 +103,7 @@ public CompressionOutputStream createOutputStream(OutputStream out) new GzipOutputStream(out); } + @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException { @@ -110,23 +114,27 @@ public CompressionOutputStream createOutputStream(OutputStream out, createOutputStream(out); } + @Override public Compressor createCompressor() { return (ZlibFactory.isNativeZlibLoaded(conf)) ? new GzipZlibCompressor(conf) : null; } + @Override public Class getCompressorType() { return ZlibFactory.isNativeZlibLoaded(conf) ? GzipZlibCompressor.class : null; } + @Override public CompressionInputStream createInputStream(InputStream in) throws IOException { return createInputStream(in, null); } + @Override public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException { @@ -137,18 +145,21 @@ public CompressionInputStream createInputStream(InputStream in, conf.getInt("io.file.buffer.size", 4*1024)); } + @Override public Decompressor createDecompressor() { return (ZlibFactory.isNativeZlibLoaded(conf)) ? new GzipZlibDecompressor() : new BuiltInGzipDecompressor(); } + @Override public Class getDecompressorType() { return ZlibFactory.isNativeZlibLoaded(conf) ? GzipZlibDecompressor.class : BuiltInGzipDecompressor.class; } + @Override public String getDefaultExtension() { return ".gz"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java index 14cc9d5b82..00e892d845 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java @@ -338,6 +338,7 @@ private void changeStateToProcessABlock() throws IOException { } + @Override public int read() throws IOException { if (this.in != null) { @@ -372,6 +373,7 @@ public int read() throws IOException { */ + @Override public int read(final byte[] dest, final int offs, final int len) throws IOException { if (offs < 0) { @@ -574,6 +576,7 @@ private void complete() throws IOException { } } + @Override public void close() throws IOException { InputStream inShadow = this.in; if (inShadow != null) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java index 3060eb924f..ca4e5cd0df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java @@ -639,6 +639,7 @@ public CBZip2OutputStream(final OutputStream out, final int blockSize) init(); } + @Override public void write(final int b) throws IOException { if (this.out != null) { write0(b); @@ -704,6 +705,7 @@ private void writeRun() throws IOException { /** * Overriden to close the stream. */ + @Override protected void finalize() throws Throwable { finish(); super.finalize(); @@ -726,6 +728,7 @@ public void finish() throws IOException { } } + @Override public void close() throws IOException { if (out != null) { OutputStream outShadow = this.out; @@ -739,6 +742,7 @@ public void close() throws IOException { } } + @Override public void flush() throws IOException { OutputStream outShadow = this.out; if (outShadow != null) { @@ -849,6 +853,7 @@ public final int getBlockSize() { return this.blockSize100k; } + @Override public void write(final byte[] buf, int offs, final int len) throws IOException { if (offs < 0) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java index 0cf65e5144..22a3118f5f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java @@ -258,6 +258,7 @@ public synchronized int getRemaining() { return 0; } + @Override public synchronized void reset() { finished = false; compressedDirectBufLen = 0; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java index baf864094e..4620092f08 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java @@ -257,6 +257,7 @@ public synchronized int getRemaining() { return 0; } + @Override public synchronized void reset() { finished = false; compressedDirectBufLen = 0; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java index 1e5525e743..41f8036fda 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java @@ -122,7 +122,7 @@ public BuiltInGzipDecompressor() { // in the first buffer load? (But how else would one do it?) } - /** {@inheritDoc} */ + @Override public synchronized boolean needsInput() { if (state == GzipStateLabel.DEFLATE_STREAM) { // most common case return inflater.needsInput(); @@ -144,6 +144,7 @@ public synchronized boolean needsInput() { * the bulk deflate stream, which is a performance hit we don't want * to absorb. (Decompressor now documents this requirement.) */ + @Override public synchronized void setInput(byte[] b, int off, int len) { if (b == null) { throw new NullPointerException(); @@ -175,6 +176,7 @@ public synchronized void setInput(byte[] b, int off, int len) { * methods below), the deflate stream is never copied; Inflater operates * directly on the user's buffer. */ + @Override public synchronized int decompress(byte[] b, int off, int len) throws IOException { int numAvailBytes = 0; @@ -421,16 +423,17 @@ public synchronized long getBytesRead() { * * @return the total (non-negative) number of unprocessed bytes in input */ + @Override public synchronized int getRemaining() { return userBufLen; } - /** {@inheritDoc} */ + @Override public synchronized boolean needsDictionary() { return inflater.needsDictionary(); } - /** {@inheritDoc} */ + @Override public synchronized void setDictionary(byte[] b, int off, int len) { inflater.setDictionary(b, off, len); } @@ -439,6 +442,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) { * Returns true if the end of the gzip substream (single "member") has been * reached.

*/ + @Override public synchronized boolean finished() { return (state == GzipStateLabel.FINISHED); } @@ -447,6 +451,7 @@ public synchronized boolean finished() { * Resets everything, including the input buffer, regardless of whether the * current gzip substream is finished.

*/ + @Override public synchronized void reset() { // could optionally emit INFO message if state != GzipStateLabel.FINISHED inflater.reset(); @@ -463,7 +468,7 @@ public synchronized void reset() { hasHeaderCRC = false; } - /** {@inheritDoc} */ + @Override public synchronized void end() { inflater.end(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java index b269d557b7..509456e834 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java @@ -48,6 +48,7 @@ public BuiltInZlibDeflater() { super(); } + @Override public synchronized int compress(byte[] b, int off, int len) throws IOException { return super.deflate(b, off, len); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java index 0223587ad0..4fda6723b8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java @@ -39,6 +39,7 @@ public BuiltInZlibInflater() { super(); } + @Override public synchronized int decompress(byte[] b, int off, int len) throws IOException { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java index 8839bc98fa..c0d0d699a5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java @@ -259,6 +259,7 @@ public synchronized void reinit(Configuration conf) { } } + @Override public synchronized void setInput(byte[] b, int off, int len) { if (b== null) { throw new NullPointerException(); @@ -287,6 +288,7 @@ synchronized void setInputFromSavedData() { uncompressedDirectBufLen = uncompressedDirectBuf.position(); } + @Override public synchronized void setDictionary(byte[] b, int off, int len) { if (stream == 0 || b == null) { throw new NullPointerException(); @@ -297,6 +299,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) { setDictionary(stream, b, off, len); } + @Override public synchronized boolean needsInput() { // Consume remaining compressed data? if (compressedDirectBuf.remaining() > 0) { @@ -325,16 +328,19 @@ public synchronized boolean needsInput() { return false; } + @Override public synchronized void finish() { finish = true; } + @Override public synchronized boolean finished() { // Check if 'zlib' says its 'finished' and // all compressed data has been consumed return (finished && compressedDirectBuf.remaining() == 0); } + @Override public synchronized int compress(byte[] b, int off, int len) throws IOException { if (b == null) { @@ -385,6 +391,7 @@ public synchronized int compress(byte[] b, int off, int len) * * @return the total (non-negative) number of compressed bytes output so far */ + @Override public synchronized long getBytesWritten() { checkStream(); return getBytesWritten(stream); @@ -395,11 +402,13 @@ public synchronized long getBytesWritten() { * * @return the total (non-negative) number of uncompressed bytes input so far */ + @Override public synchronized long getBytesRead() { checkStream(); return getBytesRead(stream); } + @Override public synchronized void reset() { checkStream(); reset(stream); @@ -413,6 +422,7 @@ public synchronized void reset() { userBufOff = userBufLen = 0; } + @Override public synchronized void end() { if (stream != 0) { end(stream); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java index 2db70551e8..ba67571998 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java @@ -118,6 +118,7 @@ public ZlibDecompressor() { this(CompressionHeader.DEFAULT_HEADER, DEFAULT_DIRECT_BUFFER_SIZE); } + @Override public synchronized void setInput(byte[] b, int off, int len) { if (b == null) { throw new NullPointerException(); @@ -154,6 +155,7 @@ synchronized void setInputFromSavedData() { userBufLen -= compressedDirectBufLen; } + @Override public synchronized void setDictionary(byte[] b, int off, int len) { if (stream == 0 || b == null) { throw new NullPointerException(); @@ -165,6 +167,7 @@ public synchronized void setDictionary(byte[] b, int off, int len) { needDict = false; } + @Override public synchronized boolean needsInput() { // Consume remaining compressed data? if (uncompressedDirectBuf.remaining() > 0) { @@ -184,16 +187,19 @@ public synchronized boolean needsInput() { return false; } + @Override public synchronized boolean needsDictionary() { return needDict; } + @Override public synchronized boolean finished() { // Check if 'zlib' says it's 'finished' and // all compressed data has been consumed return (finished && uncompressedDirectBuf.remaining() == 0); } + @Override public synchronized int decompress(byte[] b, int off, int len) throws IOException { if (b == null) { @@ -255,6 +261,7 @@ public synchronized long getBytesRead() { * * @return the total (non-negative) number of unprocessed bytes in input */ + @Override public synchronized int getRemaining() { checkStream(); return userBufLen + getRemaining(stream); // userBuf + compressedDirectBuf @@ -263,6 +270,7 @@ public synchronized int getRemaining() { /** * Resets everything including the input buffers (user and direct).

*/ + @Override public synchronized void reset() { checkStream(); reset(stream); @@ -274,6 +282,7 @@ public synchronized void reset() { userBufOff = userBufLen = 0; } + @Override public synchronized void end() { if (stream != 0) { end(stream); @@ -281,6 +290,7 @@ public synchronized void end() { } } + @Override protected void finalize() { end(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java index 6b4fdd89aa..ce93266574 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java @@ -300,6 +300,7 @@ public Writer(FSDataOutputStream fout, String compressionName, * Close the BCFile Writer. Attempting to use the Writer after calling * close is not allowed and may lead to undetermined results. */ + @Override public void close() throws IOException { if (closed == true) { return; @@ -447,6 +448,7 @@ private class MetaBlockRegister implements BlockRegister { this.compressAlgo = compressAlgo; } + @Override public void register(long raw, long begin, long end) { metaIndex.addEntry(new MetaIndexEntry(name, compressAlgo, new BlockRegion(begin, end - begin, raw))); @@ -463,6 +465,7 @@ private class DataBlockRegister implements BlockRegister { // do nothing } + @Override public void register(long raw, long begin, long end) { dataIndex.addBlockRegion(new BlockRegion(begin, end - begin, raw)); } @@ -671,6 +674,7 @@ public Version getAPIVersion() { /** * Finishing reading the BCFile. Release all resources. */ + @Override public void close() { // nothing to be done now } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java index a9cb1ec1c3..0808711f89 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java @@ -68,6 +68,7 @@ public ScalarLong(long m) { magnitude = m; } + @Override public long magnitude() { return magnitude; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java index 0b9ed9d2b3..9a57581c90 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java @@ -297,6 +297,7 @@ public Writer(FSDataOutputStream fsdos, int minBlockSize, * * The underlying FSDataOutputStream is not closed. */ + @Override public void close() throws IOException { if ((state == State.CLOSED)) { return; @@ -820,6 +821,7 @@ public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf) * Close the reader. The state of the Reader object is undefined after * close. Calling close() for multiple times has no effect. */ + @Override public void close() throws IOException { readerBCF.close(); } @@ -1573,6 +1575,7 @@ private void parkCursorAtEnd() throws IOException { * scanner after calling close is not defined. The entry returned by the * previous entry() call will be invalid. */ + @Override public void close() throws IOException { parkCursorAtEnd(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 2a7f883d95..4cfa0761ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -202,6 +202,7 @@ public static class Stat { this.mode = mode; } + @Override public String toString() { return "Stat(owner='" + owner + "', group='" + group + "'" + ", mode=" + mode + ")"; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java index 5064df5d86..db653b23f4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java @@ -38,6 +38,7 @@ public Errno getErrno() { return errno; } + @Override public String toString() { return errno.toString() + ": " + super.getMessage(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java index 8b8387ce2c..5c29a33312 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java @@ -150,6 +150,7 @@ public static final RetryPolicy failoverOnNetworkException( } static class TryOnceThenFail implements RetryPolicy { + @Override public RetryAction shouldRetry(Exception e, int retries, int failovers, boolean isMethodIdempotent) throws Exception { return RetryAction.FAIL; @@ -157,6 +158,7 @@ public RetryAction shouldRetry(Exception e, int retries, int failovers, } static class RetryForever implements RetryPolicy { + @Override public RetryAction shouldRetry(Exception e, int retries, int failovers, boolean isMethodIdempotent) throws Exception { return RetryAction.RETRY; @@ -430,6 +432,7 @@ public ExceptionDependentRetry(RetryPolicy defaultPolicy, this.exceptionToPolicyMap = exceptionToPolicyMap; } + @Override public RetryAction shouldRetry(Exception e, int retries, int failovers, boolean isMethodIdempotent) throws Exception { RetryPolicy policy = exceptionToPolicyMap.get(e.getClass()); @@ -457,6 +460,7 @@ public RemoteExceptionDependentRetry(RetryPolicy defaultPolicy, } } + @Override public RetryAction shouldRetry(Exception e, int retries, int failovers, boolean isMethodIdempotent) throws Exception { RetryPolicy policy = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java index 7e74cb7732..05205c5523 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java @@ -56,6 +56,7 @@ protected DeserializerComparator(Deserializer deserializer) this.deserializer.open(buffer); } + @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java index 61d6f171c9..f08d0008c6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java @@ -24,11 +24,8 @@ import java.io.ObjectOutputStream; import java.io.OutputStream; import java.io.Serializable; -import java.util.Map; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.io.RawComparator; /** *

@@ -45,6 +42,7 @@ static class JavaSerializationDeserializer private ObjectInputStream ois; + @Override public void open(InputStream in) throws IOException { ois = new ObjectInputStream(in) { @Override protected void readStreamHeader() { @@ -53,6 +51,7 @@ public void open(InputStream in) throws IOException { }; } + @Override @SuppressWarnings("unchecked") public T deserialize(T object) throws IOException { try { @@ -63,6 +62,7 @@ public T deserialize(T object) throws IOException { } } + @Override public void close() throws IOException { ois.close(); } @@ -74,6 +74,7 @@ static class JavaSerializationSerializer private ObjectOutputStream oos; + @Override public void open(OutputStream out) throws IOException { oos = new ObjectOutputStream(out) { @Override protected void writeStreamHeader() { @@ -82,27 +83,32 @@ public void open(OutputStream out) throws IOException { }; } + @Override public void serialize(Serializable object) throws IOException { oos.reset(); // clear (class) back-references oos.writeObject(object); } + @Override public void close() throws IOException { oos.close(); } } + @Override @InterfaceAudience.Private public boolean accept(Class c) { return Serializable.class.isAssignableFrom(c); } + @Override @InterfaceAudience.Private public Deserializer getDeserializer(Class c) { return new JavaSerializationDeserializer(); } + @Override @InterfaceAudience.Private public Serializer getSerializer(Class c) { return new JavaSerializationSerializer(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java index 12927bea14..f9bf692f1f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java @@ -44,6 +44,7 @@ public JavaSerializationComparator() throws IOException { super(new JavaSerialization.JavaSerializationDeserializer()); } + @Override @InterfaceAudience.Private public int compare(T o1, T o2) { return o1.compareTo(o2); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java index 8511d25bcd..ad965d6b2f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java @@ -23,8 +23,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.Map; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java index 1d5c068886..f340cb3a98 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java @@ -47,11 +47,13 @@ public abstract class AvroSerialization extends Configured @InterfaceAudience.Private public static final String AVRO_SCHEMA_KEY = "Avro-Schema"; + @Override @InterfaceAudience.Private public Deserializer getDeserializer(Class c) { return new AvroDeserializer(c); } + @Override @InterfaceAudience.Private public Serializer getSerializer(Class c) { return new AvroSerializer(c); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index b0f5c93f75..de7af1b6b0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -364,6 +364,7 @@ private void handleTimeout(SocketTimeoutException e) throws IOException { * until a byte is read. * @throws IOException for any IO problem other than socket timeout */ + @Override public int read() throws IOException { do { try { @@ -380,6 +381,7 @@ public int read() throws IOException { * * @return the total number of bytes read; -1 if the connection is closed. */ + @Override public int read(byte[] buf, int off, int len) throws IOException { do { try { @@ -510,6 +512,7 @@ private synchronized void handleSaslConnectionFailure( final Random rand, final UserGroupInformation ugi) throws IOException, InterruptedException { ugi.doAs(new PrivilegedExceptionAction() { + @Override public Object run() throws IOException, InterruptedException { final short MAX_BACKOFF = 5000; closeConnection(); @@ -803,6 +806,7 @@ private synchronized void sendPing() throws IOException { } } + @Override public void run() { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": starting, having connections " diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index 96ca97817a..3a33abe6a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -240,6 +240,7 @@ public Object invoke(Object proxy, Method method, Object[] args) return returnMessage; } + @Override public void close() throws IOException { if (!isClosed) { isClosed = true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java index f15c0837dc..cc66958d14 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ipc; import java.io.IOException; -import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.HashSet; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java index 1cc269f666..69d18eacd3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java @@ -36,7 +36,8 @@ public class ProtocolSignature implements Writable { WritableFactories.setFactory (ProtocolSignature.class, new WritableFactory() { - public Writable newInstance() { return new ProtocolSignature(); } + @Override + public Writable newInstance() { return new ProtocolSignature(); } }); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index a3460ed9b0..bc3317120f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -517,6 +517,7 @@ private class Reader extends Thread { this.readSelector = Selector.open(); } + @Override public void run() { LOG.info("Starting " + getName()); try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java index 965c77abc7..fc6fc6f8c5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java @@ -142,6 +142,7 @@ public long getRpcVersion() { return rpcVersion; } + @Override @SuppressWarnings("deprecation") public void readFields(DataInput in) throws IOException { rpcVersion = in.readLong(); @@ -159,6 +160,7 @@ public void readFields(DataInput in) throws IOException { } } + @Override @SuppressWarnings("deprecation") public void write(DataOutput out) throws IOException { out.writeLong(rpcVersion); @@ -173,6 +175,7 @@ public void write(DataOutput out) throws IOException { } } + @Override public String toString() { StringBuilder buffer = new StringBuilder(); buffer.append(methodName); @@ -189,10 +192,12 @@ public String toString() { return buffer.toString(); } + @Override public void setConf(Configuration conf) { this.conf = conf; } + @Override public Configuration getConf() { return this.conf; } @@ -215,6 +220,7 @@ public Invoker(Class protocol, this.client = CLIENTS.getClient(conf, factory); } + @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { long startTime = 0; @@ -232,6 +238,7 @@ public Object invoke(Object proxy, Method method, Object[] args) } /* close the IPC client that's responsible for this invoker's RPCs */ + @Override synchronized public void close() { if (!isClosed) { isClosed = true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java index 775279dfb4..5e2ed53255 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java @@ -88,6 +88,7 @@ private static void process(String urlstring) { public static class Servlet extends HttpServlet { private static final long serialVersionUID = 1L; + @Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java index c4fa203c61..841874fc08 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java @@ -35,7 +35,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; -import org.apache.hadoop.metrics.MetricsException; import org.apache.hadoop.metrics.spi.AbstractMetricsContext; import org.apache.hadoop.metrics.spi.OutputRecord; import org.apache.hadoop.metrics.spi.Util; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java index 1e2ee39814..60f5fec44a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java @@ -30,7 +30,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; import org.apache.hadoop.metrics.MetricsContext; -import org.apache.hadoop.metrics.MetricsException; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsUtil; import org.apache.hadoop.metrics.Updater; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java index 63f7ddaacd..ff2a49c69a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java @@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; -import org.apache.hadoop.metrics.MetricsException; /** * A null context which has a thread calling diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/OutputRecord.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/OutputRecord.java index fa2c09217f..d94c8ab46e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/OutputRecord.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/OutputRecord.java @@ -21,11 +21,7 @@ package org.apache.hadoop.metrics.spi; import java.util.Collections; -import java.util.Map; import java.util.Set; -import java.util.TreeMap; -import java.util.Map.Entry; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java index 166a846fdf..32aa431991 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java @@ -22,7 +22,6 @@ package org.apache.hadoop.metrics.spi; import java.net.InetSocketAddress; -import java.net.SocketAddress; import java.util.ArrayList; import java.util.List; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsIntValue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsIntValue.java index a84ebc9898..f7a173a2fb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsIntValue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsIntValue.java @@ -19,8 +19,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; -import org.apache.hadoop.util.StringUtils; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java index 790397fd4b..e68d97e7b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java @@ -19,8 +19,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; -import org.apache.hadoop.util.StringUtils; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java index ad431395eb..7d0e1b2fe1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java @@ -20,8 +20,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; -import org.apache.hadoop.util.StringUtils; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java index 73f7ef3011..db09248128 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java @@ -19,8 +19,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; -import org.apache.hadoop.util.StringUtils; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java index 230a9f00f0..e4b5580536 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java @@ -24,7 +24,6 @@ import java.net.URLClassLoader; import static java.security.AccessController.*; import java.security.PrivilegedAction; -import java.util.Arrays; import java.util.Iterator; import java.util.Locale; import java.util.Map; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java index 929590f467..df1b008be1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java @@ -18,7 +18,6 @@ package org.apache.hadoop.metrics2.sink; -import java.io.BufferedOutputStream; import java.io.File; import java.io.FileWriter; import java.io.PrintWriter; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java index 852f9132d3..f436a667f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java @@ -23,7 +23,6 @@ import java.lang.management.MemoryUsage; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; -import static java.lang.Thread.State.*; import java.lang.management.GarbageCollectorMXBean; import java.util.Map; import java.util.List; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java index b1f0fb230f..b2d803c95f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java @@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java index 0a9ff7301b..73bbd4a631 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java @@ -30,7 +30,6 @@ import java.util.Collections; import java.util.Enumeration; import java.util.LinkedHashSet; -import java.util.Set; import java.util.Vector; import javax.naming.NamingException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index b3627ea264..046f0e7467 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -737,6 +737,7 @@ public int countNumOfAvailableNodes(String scope, } /** convert a network tree to a string */ + @Override public String toString() { // print the number of racks StringBuilder tree = new StringBuilder(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java index a41a42463c..b8502d016b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java @@ -26,7 +26,6 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java index 62076b191b..ed12b3c6be 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java @@ -33,7 +33,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java index a0b0c3ed0f..46039a5506 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java @@ -50,6 +50,7 @@ private static class Reader extends SocketIOWithTimeout { this.channel = channel; } + @Override int performIO(ByteBuffer buf) throws IOException { return channel.read(buf); } @@ -123,10 +124,12 @@ public int read() throws IOException { return ret; } + @Override public int read(byte[] b, int off, int len) throws IOException { return read(ByteBuffer.wrap(b, off, len)); } + @Override public synchronized void close() throws IOException { /* close the channel since Socket.getInputStream().close() * closes the socket. @@ -146,10 +149,12 @@ public ReadableByteChannel getChannel() { //ReadableByteChannel interface + @Override public boolean isOpen() { return reader.isOpen(); } + @Override public int read(ByteBuffer dst) throws IOException { return reader.doIO(dst, SelectionKey.OP_READ); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketOutputStream.java index e7bfadeeda..091c684059 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketOutputStream.java @@ -58,6 +58,7 @@ private static class Writer extends SocketIOWithTimeout { this.channel = channel; } + @Override int performIO(ByteBuffer buf) throws IOException { return channel.write(buf); } @@ -98,6 +99,7 @@ public SocketOutputStream(Socket socket, long timeout) this(socket.getChannel(), timeout); } + @Override public void write(int b) throws IOException { /* If we need to, we can optimize this allocation. * probably no need to optimize or encourage single byte writes. @@ -107,6 +109,7 @@ public void write(int b) throws IOException { write(buf, 0, 1); } + @Override public void write(byte[] b, int off, int len) throws IOException { ByteBuffer buf = ByteBuffer.wrap(b, off, len); while (buf.hasRemaining()) { @@ -126,6 +129,7 @@ public void write(byte[] b, int off, int len) throws IOException { } } + @Override public synchronized void close() throws IOException { /* close the channel since Socket.getOuputStream().close() * closes the socket. @@ -145,10 +149,12 @@ public WritableByteChannel getChannel() { //WritableByteChannle interface + @Override public boolean isOpen() { return writer.isOpen(); } + @Override public int write(ByteBuffer src) throws IOException { return writer.doIO(src, SelectionKey.OP_WRITE); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java index 0192aa4cbc..6b84f9d2cf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java @@ -59,14 +59,12 @@ public SocksSocketFactory(Proxy proxy) { this.proxy = proxy; } - /* @inheritDoc */ @Override public Socket createSocket() throws IOException { return new Socket(proxy); } - /* @inheritDoc */ @Override public Socket createSocket(InetAddress addr, int port) throws IOException { @@ -75,7 +73,6 @@ public Socket createSocket(InetAddress addr, int port) throws IOException { return socket; } - /* @inheritDoc */ @Override public Socket createSocket(InetAddress addr, int port, InetAddress localHostAddr, int localPort) throws IOException { @@ -86,7 +83,6 @@ public Socket createSocket(InetAddress addr, int port, return socket; } - /* @inheritDoc */ @Override public Socket createSocket(String host, int port) throws IOException, UnknownHostException { @@ -96,7 +92,6 @@ public Socket createSocket(String host, int port) throws IOException, return socket; } - /* @inheritDoc */ @Override public Socket createSocket(String host, int port, InetAddress localHostAddr, int localPort) throws IOException, @@ -108,13 +103,11 @@ public Socket createSocket(String host, int port, return socket; } - /* @inheritDoc */ @Override public int hashCode() { return proxy.hashCode(); } - /* @inheritDoc */ @Override public boolean equals(Object obj) { if (this == obj) @@ -132,12 +125,12 @@ public boolean equals(Object obj) { return true; } - /* @inheritDoc */ + @Override public Configuration getConf() { return this.conf; } - /* @inheritDoc */ + @Override public void setConf(Configuration conf) { this.conf = conf; String proxyStr = conf.get("hadoop.socks.server"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/StandardSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/StandardSocketFactory.java index f4942cef26..ac38819360 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/StandardSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/StandardSocketFactory.java @@ -42,7 +42,6 @@ public class StandardSocketFactory extends SocketFactory { public StandardSocketFactory() { } - /* @inheritDoc */ @Override public Socket createSocket() throws IOException { /* @@ -63,7 +62,6 @@ public Socket createSocket() throws IOException { return SocketChannel.open().socket(); } - /* @inheritDoc */ @Override public Socket createSocket(InetAddress addr, int port) throws IOException { @@ -72,7 +70,6 @@ public Socket createSocket(InetAddress addr, int port) throws IOException { return socket; } - /* @inheritDoc */ @Override public Socket createSocket(InetAddress addr, int port, InetAddress localHostAddr, int localPort) throws IOException { @@ -83,7 +80,6 @@ public Socket createSocket(InetAddress addr, int port, return socket; } - /* @inheritDoc */ @Override public Socket createSocket(String host, int port) throws IOException, UnknownHostException { @@ -93,7 +89,6 @@ public Socket createSocket(String host, int port) throws IOException, return socket; } - /* @inheritDoc */ @Override public Socket createSocket(String host, int port, InetAddress localHostAddr, int localPort) throws IOException, @@ -105,7 +100,6 @@ public Socket createSocket(String host, int port, return socket; } - /* @inheritDoc */ @Override public boolean equals(Object obj) { if (this == obj) @@ -115,7 +109,6 @@ public boolean equals(Object obj) { return obj.getClass().equals(this.getClass()); } - /* @inheritDoc */ @Override public int hashCode() { return this.getClass().hashCode(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java index 277432bf14..b245c80969 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java @@ -125,6 +125,7 @@ private synchronized void load() { } } + @Override public synchronized List resolve(List names) { if (!initialized) { initialized = true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordInput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordInput.java index d4c60e08d6..5c302e55e3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordInput.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordInput.java @@ -41,9 +41,11 @@ static private class BinaryIndex implements Index { private BinaryIndex(int nelems) { this.nelems = nelems; } + @Override public boolean done() { return (nelems <= 0); } + @Override public void incr() { nelems--; } @@ -56,6 +58,7 @@ private void setDataInput(DataInput inp) { } private static ThreadLocal bIn = new ThreadLocal() { + @Override protected synchronized Object initialValue() { return new BinaryRecordInput(); } @@ -82,34 +85,42 @@ public BinaryRecordInput(DataInput din) { this.in = din; } + @Override public byte readByte(final String tag) throws IOException { return in.readByte(); } + @Override public boolean readBool(final String tag) throws IOException { return in.readBoolean(); } + @Override public int readInt(final String tag) throws IOException { return Utils.readVInt(in); } + @Override public long readLong(final String tag) throws IOException { return Utils.readVLong(in); } + @Override public float readFloat(final String tag) throws IOException { return in.readFloat(); } + @Override public double readDouble(final String tag) throws IOException { return in.readDouble(); } + @Override public String readString(final String tag) throws IOException { return Utils.fromBinaryString(in); } + @Override public Buffer readBuffer(final String tag) throws IOException { final int len = Utils.readVInt(in); final byte[] barr = new byte[len]; @@ -117,26 +128,32 @@ public Buffer readBuffer(final String tag) throws IOException { return new Buffer(barr); } + @Override public void startRecord(final String tag) throws IOException { // no-op } + @Override public void endRecord(final String tag) throws IOException { // no-op } + @Override public Index startVector(final String tag) throws IOException { return new BinaryIndex(readInt(tag)); } + @Override public void endVector(final String tag) throws IOException { // no-op } + @Override public Index startMap(final String tag) throws IOException { return new BinaryIndex(readInt(tag)); } + @Override public void endMap(final String tag) throws IOException { // no-op } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordOutput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordOutput.java index 699f635e68..aa6b8e95c5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordOutput.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordOutput.java @@ -45,6 +45,7 @@ private void setDataOutput(DataOutput out) { } private static ThreadLocal bOut = new ThreadLocal() { + @Override protected synchronized Object initialValue() { return new BinaryRecordOutput(); } @@ -72,34 +73,42 @@ public BinaryRecordOutput(DataOutput out) { } + @Override public void writeByte(byte b, String tag) throws IOException { out.writeByte(b); } + @Override public void writeBool(boolean b, String tag) throws IOException { out.writeBoolean(b); } + @Override public void writeInt(int i, String tag) throws IOException { Utils.writeVInt(out, i); } + @Override public void writeLong(long l, String tag) throws IOException { Utils.writeVLong(out, l); } + @Override public void writeFloat(float f, String tag) throws IOException { out.writeFloat(f); } + @Override public void writeDouble(double d, String tag) throws IOException { out.writeDouble(d); } + @Override public void writeString(String s, String tag) throws IOException { Utils.toBinaryString(out, s); } + @Override public void writeBuffer(Buffer buf, String tag) throws IOException { byte[] barr = buf.get(); @@ -108,20 +117,26 @@ public void writeBuffer(Buffer buf, String tag) out.write(barr, 0, len); } + @Override public void startRecord(Record r, String tag) throws IOException {} + @Override public void endRecord(Record r, String tag) throws IOException {} + @Override public void startVector(ArrayList v, String tag) throws IOException { writeInt(v.size(), tag); } + @Override public void endVector(ArrayList v, String tag) throws IOException {} + @Override public void startMap(TreeMap v, String tag) throws IOException { writeInt(v.size(), tag); } + @Override public void endMap(TreeMap v, String tag) throws IOException {} } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java index 5f6630852b..eb569271d2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java @@ -187,6 +187,7 @@ public void append(byte[] bytes) { } // inherit javadoc + @Override public int hashCode() { int hash = 1; byte[] b = this.get(); @@ -202,6 +203,7 @@ public int hashCode() { * @return Positive if this is bigger than other, 0 if they are equal, and * negative if this is smaller than other. */ + @Override public int compareTo(Object other) { Buffer right = ((Buffer) other); byte[] lb = this.get(); @@ -217,6 +219,7 @@ public int compareTo(Object other) { } // inherit javadoc + @Override public boolean equals(Object other) { if (other instanceof Buffer && this != other) { return compareTo(other) == 0; @@ -225,6 +228,7 @@ public boolean equals(Object other) { } // inheric javadoc + @Override public String toString() { StringBuilder sb = new StringBuilder(2*count); for(int idx = 0; idx < count; idx++) { @@ -245,6 +249,7 @@ public String toString(String charsetName) } // inherit javadoc + @Override public Object clone() throws CloneNotSupportedException { Buffer result = (Buffer) super.clone(); result.copy(this.get(), 0, this.getCount()); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordInput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordInput.java index 44093573c9..e9fa0c35b8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordInput.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordInput.java @@ -38,6 +38,7 @@ public class CsvRecordInput implements RecordInput { private PushbackReader stream; private class CsvIndex implements Index { + @Override public boolean done() { char c = '\0'; try { @@ -47,6 +48,7 @@ public boolean done() { } return (c == '}') ? true : false; } + @Override public void incr() {} } @@ -85,19 +87,23 @@ public CsvRecordInput(InputStream in) { } } + @Override public byte readByte(String tag) throws IOException { return (byte) readLong(tag); } + @Override public boolean readBool(String tag) throws IOException { String sval = readField(tag); return "T".equals(sval) ? true : false; } + @Override public int readInt(String tag) throws IOException { return (int) readLong(tag); } + @Override public long readLong(String tag) throws IOException { String sval = readField(tag); try { @@ -108,10 +114,12 @@ public long readLong(String tag) throws IOException { } } + @Override public float readFloat(String tag) throws IOException { return (float) readDouble(tag); } + @Override public double readDouble(String tag) throws IOException { String sval = readField(tag); try { @@ -122,16 +130,19 @@ public double readDouble(String tag) throws IOException { } } + @Override public String readString(String tag) throws IOException { String sval = readField(tag); return Utils.fromCSVString(sval); } + @Override public Buffer readBuffer(String tag) throws IOException { String sval = readField(tag); return Utils.fromCSVBuffer(sval); } + @Override public void startRecord(String tag) throws IOException { if (tag != null && !"".equals(tag)) { char c1 = (char) stream.read(); @@ -142,6 +153,7 @@ public void startRecord(String tag) throws IOException { } } + @Override public void endRecord(String tag) throws IOException { char c = (char) stream.read(); if (tag == null || "".equals(tag)) { @@ -163,6 +175,7 @@ public void endRecord(String tag) throws IOException { return; } + @Override public Index startVector(String tag) throws IOException { char c1 = (char) stream.read(); char c2 = (char) stream.read(); @@ -172,6 +185,7 @@ public Index startVector(String tag) throws IOException { return new CsvIndex(); } + @Override public void endVector(String tag) throws IOException { char c = (char) stream.read(); if (c != '}') { @@ -184,6 +198,7 @@ public void endVector(String tag) throws IOException { return; } + @Override public Index startMap(String tag) throws IOException { char c1 = (char) stream.read(); char c2 = (char) stream.read(); @@ -193,6 +208,7 @@ public Index startMap(String tag) throws IOException { return new CsvIndex(); } + @Override public void endMap(String tag) throws IOException { char c = (char) stream.read(); if (c != '}') { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordOutput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordOutput.java index a638d0bcc9..d770f47cf5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordOutput.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordOutput.java @@ -61,10 +61,12 @@ public CsvRecordOutput(OutputStream out) { } } + @Override public void writeByte(byte b, String tag) throws IOException { writeLong((long)b, tag); } + @Override public void writeBool(boolean b, String tag) throws IOException { printCommaUnlessFirst(); String val = b ? "T" : "F"; @@ -72,32 +74,38 @@ public void writeBool(boolean b, String tag) throws IOException { throwExceptionOnError(tag); } + @Override public void writeInt(int i, String tag) throws IOException { writeLong((long)i, tag); } + @Override public void writeLong(long l, String tag) throws IOException { printCommaUnlessFirst(); stream.print(l); throwExceptionOnError(tag); } + @Override public void writeFloat(float f, String tag) throws IOException { writeDouble((double)f, tag); } + @Override public void writeDouble(double d, String tag) throws IOException { printCommaUnlessFirst(); stream.print(d); throwExceptionOnError(tag); } + @Override public void writeString(String s, String tag) throws IOException { printCommaUnlessFirst(); stream.print(Utils.toCSVString(s)); throwExceptionOnError(tag); } + @Override public void writeBuffer(Buffer buf, String tag) throws IOException { printCommaUnlessFirst(); @@ -105,6 +113,7 @@ public void writeBuffer(Buffer buf, String tag) throwExceptionOnError(tag); } + @Override public void startRecord(Record r, String tag) throws IOException { if (tag != null && !"".equals(tag)) { printCommaUnlessFirst(); @@ -113,6 +122,7 @@ public void startRecord(Record r, String tag) throws IOException { } } + @Override public void endRecord(Record r, String tag) throws IOException { if (tag == null || "".equals(tag)) { stream.print("\n"); @@ -123,23 +133,27 @@ public void endRecord(Record r, String tag) throws IOException { } } + @Override public void startVector(ArrayList v, String tag) throws IOException { printCommaUnlessFirst(); stream.print("v{"); isFirst = true; } + @Override public void endVector(ArrayList v, String tag) throws IOException { stream.print("}"); isFirst = false; } + @Override public void startMap(TreeMap v, String tag) throws IOException { printCommaUnlessFirst(); stream.print("m{"); isFirst = true; } + @Override public void endMap(TreeMap v, String tag) throws IOException { stream.print("}"); isFirst = false; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Record.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Record.java index df0b6c2c85..f0ec99ad81 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Record.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Record.java @@ -54,6 +54,7 @@ public abstract void deserialize(RecordInput rin, String tag) throws IOException; // inheric javadoc + @Override public abstract int compareTo (final Object peer) throws ClassCastException; /** @@ -73,18 +74,21 @@ public void deserialize(RecordInput rin) throws IOException { } // inherit javadoc + @Override public void write(final DataOutput out) throws java.io.IOException { BinaryRecordOutput bout = BinaryRecordOutput.get(out); this.serialize(bout); } // inherit javadoc + @Override public void readFields(final DataInput din) throws java.io.IOException { BinaryRecordInput rin = BinaryRecordInput.get(din); this.deserialize(rin); } // inherit javadoc + @Override public String toString() { try { ByteArrayOutputStream s = new ByteArrayOutputStream(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordComparator.java index 2c86804c14..805d93160a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordComparator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordComparator.java @@ -40,6 +40,7 @@ protected RecordComparator(Class recordClass) { } // inheric JavaDoc + @Override public abstract int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2); /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordInput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordInput.java index 5e77a4552b..871e04bff0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordInput.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordInput.java @@ -63,10 +63,13 @@ private XMLParser(ArrayList vlist) { valList = vlist; } + @Override public void startDocument() throws SAXException {} + @Override public void endDocument() throws SAXException {} + @Override public void startElement(String ns, String sname, String qname, @@ -88,6 +91,7 @@ public void startElement(String ns, } } + @Override public void endElement(String ns, String sname, String qname) throws SAXException { @@ -98,6 +102,7 @@ public void endElement(String ns, } } + @Override public void characters(char buf[], int offset, int len) throws SAXException { if (charsValid) { @@ -109,6 +114,7 @@ public void characters(char buf[], int offset, int len) } private class XmlIndex implements Index { + @Override public boolean done() { Value v = valList.get(vIdx); if ("/array".equals(v.getType())) { @@ -119,6 +125,7 @@ public boolean done() { return false; } } + @Override public void incr() {} } @@ -152,6 +159,7 @@ public XmlRecordInput(InputStream in) { } } + @Override public byte readByte(String tag) throws IOException { Value v = next(); if (!"ex:i1".equals(v.getType())) { @@ -160,6 +168,7 @@ public byte readByte(String tag) throws IOException { return Byte.parseByte(v.getValue()); } + @Override public boolean readBool(String tag) throws IOException { Value v = next(); if (!"boolean".equals(v.getType())) { @@ -168,6 +177,7 @@ public boolean readBool(String tag) throws IOException { return "1".equals(v.getValue()); } + @Override public int readInt(String tag) throws IOException { Value v = next(); if (!"i4".equals(v.getType()) && @@ -177,6 +187,7 @@ public int readInt(String tag) throws IOException { return Integer.parseInt(v.getValue()); } + @Override public long readLong(String tag) throws IOException { Value v = next(); if (!"ex:i8".equals(v.getType())) { @@ -185,6 +196,7 @@ public long readLong(String tag) throws IOException { return Long.parseLong(v.getValue()); } + @Override public float readFloat(String tag) throws IOException { Value v = next(); if (!"ex:float".equals(v.getType())) { @@ -193,6 +205,7 @@ public float readFloat(String tag) throws IOException { return Float.parseFloat(v.getValue()); } + @Override public double readDouble(String tag) throws IOException { Value v = next(); if (!"double".equals(v.getType())) { @@ -201,6 +214,7 @@ public double readDouble(String tag) throws IOException { return Double.parseDouble(v.getValue()); } + @Override public String readString(String tag) throws IOException { Value v = next(); if (!"string".equals(v.getType())) { @@ -209,6 +223,7 @@ public String readString(String tag) throws IOException { return Utils.fromXMLString(v.getValue()); } + @Override public Buffer readBuffer(String tag) throws IOException { Value v = next(); if (!"string".equals(v.getType())) { @@ -217,6 +232,7 @@ public Buffer readBuffer(String tag) throws IOException { return Utils.fromXMLBuffer(v.getValue()); } + @Override public void startRecord(String tag) throws IOException { Value v = next(); if (!"struct".equals(v.getType())) { @@ -224,6 +240,7 @@ public void startRecord(String tag) throws IOException { } } + @Override public void endRecord(String tag) throws IOException { Value v = next(); if (!"/struct".equals(v.getType())) { @@ -231,6 +248,7 @@ public void endRecord(String tag) throws IOException { } } + @Override public Index startVector(String tag) throws IOException { Value v = next(); if (!"array".equals(v.getType())) { @@ -239,12 +257,15 @@ public Index startVector(String tag) throws IOException { return new XmlIndex(); } + @Override public void endVector(String tag) throws IOException {} + @Override public Index startMap(String tag) throws IOException { return startVector(tag); } + @Override public void endMap(String tag) throws IOException { endVector(tag); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordOutput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordOutput.java index adf28a2fdf..9cf85537ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordOutput.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordOutput.java @@ -149,6 +149,7 @@ public XmlRecordOutput(OutputStream out) { } } + @Override public void writeByte(byte b, String tag) throws IOException { printBeginEnvelope(tag); stream.print(""); @@ -157,6 +158,7 @@ public void writeByte(byte b, String tag) throws IOException { printEndEnvelope(tag); } + @Override public void writeBool(boolean b, String tag) throws IOException { printBeginEnvelope(tag); stream.print(""); @@ -165,6 +167,7 @@ public void writeBool(boolean b, String tag) throws IOException { printEndEnvelope(tag); } + @Override public void writeInt(int i, String tag) throws IOException { printBeginEnvelope(tag); stream.print(""); @@ -173,6 +176,7 @@ public void writeInt(int i, String tag) throws IOException { printEndEnvelope(tag); } + @Override public void writeLong(long l, String tag) throws IOException { printBeginEnvelope(tag); stream.print(""); @@ -181,6 +185,7 @@ public void writeLong(long l, String tag) throws IOException { printEndEnvelope(tag); } + @Override public void writeFloat(float f, String tag) throws IOException { printBeginEnvelope(tag); stream.print(""); @@ -189,6 +194,7 @@ public void writeFloat(float f, String tag) throws IOException { printEndEnvelope(tag); } + @Override public void writeDouble(double d, String tag) throws IOException { printBeginEnvelope(tag); stream.print(""); @@ -197,6 +203,7 @@ public void writeDouble(double d, String tag) throws IOException { printEndEnvelope(tag); } + @Override public void writeString(String s, String tag) throws IOException { printBeginEnvelope(tag); stream.print(""); @@ -205,6 +212,7 @@ public void writeString(String s, String tag) throws IOException { printEndEnvelope(tag); } + @Override public void writeBuffer(Buffer buf, String tag) throws IOException { printBeginEnvelope(tag); @@ -214,12 +222,14 @@ public void writeBuffer(Buffer buf, String tag) printEndEnvelope(tag); } + @Override public void startRecord(Record r, String tag) throws IOException { insideRecord(tag); stream.print("\n"); addIndent(); } + @Override public void endRecord(Record r, String tag) throws IOException { closeIndent(); putIndent(); @@ -227,12 +237,14 @@ public void endRecord(Record r, String tag) throws IOException { outsideRecord(tag); } + @Override public void startVector(ArrayList v, String tag) throws IOException { insideVector(tag); stream.print("\n"); addIndent(); } + @Override public void endVector(ArrayList v, String tag) throws IOException { closeIndent(); putIndent(); @@ -240,12 +252,14 @@ public void endVector(ArrayList v, String tag) throws IOException { outsideVector(tag); } + @Override public void startMap(TreeMap v, String tag) throws IOException { insideMap(tag); stream.print("\n"); addIndent(); } + @Override public void endMap(TreeMap v, String tag) throws IOException { closeIndent(); putIndent(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CGenerator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CGenerator.java index b62b62924b..69ab37a152 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CGenerator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CGenerator.java @@ -37,6 +37,7 @@ class CGenerator extends CodeGenerator { * and spits-out file-level elements (such as include statements etc.) * record-level code is generated by JRecord. */ + @Override void genCode(String name, ArrayList ilist, ArrayList rlist, String destDir, ArrayList options) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeBuffer.java index a18871c74a..ec4d5df981 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeBuffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeBuffer.java @@ -98,6 +98,7 @@ private void rawAppend(char ch) { sb.append(ch); } + @Override public String toString() { return sb.toString(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/Consts.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/Consts.java index 7f3e8d74df..2a186fa283 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/Consts.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/Consts.java @@ -18,12 +18,8 @@ package org.apache.hadoop.record.compiler; -import java.io.IOException; -import java.util.Iterator; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.record.RecordInput; /** * const definitions for Record I/O compiler diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CppGenerator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CppGenerator.java index e1fb599c04..1c97a48d53 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CppGenerator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CppGenerator.java @@ -37,6 +37,7 @@ class CppGenerator extends CodeGenerator { * and spits-out file-level elements (such as include statements etc.) * record-level code is generated by JRecord. */ + @Override void genCode(String name, ArrayList ilist, ArrayList rlist, String destDir, ArrayList options) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBoolean.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBoolean.java index 77791f2723..95dc7334f7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBoolean.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBoolean.java @@ -36,20 +36,24 @@ class JavaBoolean extends JType.JavaType { super("boolean", "Bool", "Boolean", "TypeID.RIOType.BOOL"); } + @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append(Consts.RIO_PREFIX + "ret = ("+fname+" == "+other+")? 0 : ("+ fname+"?1:-1);\n"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.BoolTypeID"; } + @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = ("+fname+")?0:1;\n"); } // In Binary format, boolean is written as byte. true = 1, false = 0 + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<1) {\n"); @@ -61,6 +65,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { } // In Binary format, boolean is written as byte. true = 1, false = 0 + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<1 || l2<1) {\n"); @@ -81,6 +86,7 @@ class CppBoolean extends CppType { super("bool"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BOOL)"; } @@ -93,6 +99,7 @@ public JBoolean() { setCType(new CType()); } + @Override String getSignature() { return "z"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBuffer.java index 53b8264e3e..7c4a670607 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBuffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBuffer.java @@ -39,22 +39,27 @@ class JavaBuffer extends JavaCompType { "org.apache.hadoop.record.Buffer", "TypeID.RIOType.BUFFER"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.BufferTypeID"; } + @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".compareTo("+other+");\n"); } + @Override void genEquals(CodeBuffer cb, String fname, String peer) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".equals("+peer+");\n"); } + @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".hashCode();\n"); } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+ @@ -64,6 +69,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); @@ -84,6 +90,7 @@ class CppBuffer extends CppCompType { super(" ::std::string"); } + @Override void genGetSet(CodeBuffer cb, String fname) { cb.append("virtual const "+getType()+"& get"+toCamelCase(fname)+"() const {\n"); cb.append("return "+fname+";\n"); @@ -93,6 +100,7 @@ void genGetSet(CodeBuffer cb, String fname) { cb.append("}\n"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BUFFER)"; } @@ -105,6 +113,7 @@ public JBuffer() { setCType(new CCompType()); } + @Override String getSignature() { return "B"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JByte.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JByte.java index 96f191d5fb..1ac7171f24 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JByte.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JByte.java @@ -37,10 +37,12 @@ class JavaByte extends JavaType { super("byte", "Byte", "Byte", "TypeID.RIOType.BYTE"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.ByteTypeID"; } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<1) {\n"); @@ -51,6 +53,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<1 || l2<1) {\n"); @@ -71,6 +74,7 @@ class CppByte extends CppType { super("int8_t"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BYTE)"; } @@ -82,6 +86,7 @@ public JByte() { setCType(new CType()); } + @Override String getSignature() { return "b"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JCompType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JCompType.java index f71d97d50c..5f41aeb56f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JCompType.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JCompType.java @@ -35,18 +35,22 @@ abstract class JavaCompType extends JavaType { super(type, suffix, wrapper, typeIDByteString); } + @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".compareTo("+other+");\n"); } + @Override void genEquals(CodeBuffer cb, String fname, String peer) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".equals("+peer+");\n"); } + @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".hashCode();\n"); } + @Override void genClone(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "other."+fname+" = ("+getType()+") this."+ fname+".clone();\n"); @@ -59,6 +63,7 @@ abstract class CppCompType extends CppType { super(type); } + @Override void genGetSet(CodeBuffer cb, String fname) { cb.append("virtual const "+getType()+"& get"+toCamelCase(fname)+"() const {\n"); cb.append("return "+fname+";\n"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JDouble.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JDouble.java index cdab26341d..5e1a65fc07 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JDouble.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JDouble.java @@ -36,15 +36,18 @@ class JavaDouble extends JavaType { super("double", "Double", "Double", "TypeID.RIOType.DOUBLE"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.DoubleTypeID"; } + @Override void genHashCode(CodeBuffer cb, String fname) { String tmp = "Double.doubleToLongBits("+fname+")"; cb.append(Consts.RIO_PREFIX + "ret = (int)("+tmp+"^("+tmp+">>>32));\n"); } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<8) {\n"); @@ -55,6 +58,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<8 || l2<8) {\n"); @@ -77,6 +81,7 @@ class CppDouble extends CppType { super("double"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_DOUBLE)"; } @@ -90,6 +95,7 @@ public JDouble() { setCType(new CType()); } + @Override String getSignature() { return "d"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFloat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFloat.java index 1081651be6..10aa69ad2f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFloat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFloat.java @@ -35,14 +35,17 @@ class JavaFloat extends JavaType { super("float", "Float", "Float", "TypeID.RIOType.FLOAT"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.FloatTypeID"; } + @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = Float.floatToIntBits("+fname+");\n"); } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<4) {\n"); @@ -53,6 +56,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<4 || l2<4) {\n"); @@ -75,6 +79,7 @@ class CppFloat extends CppType { super("float"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_FLOAT)"; } @@ -87,6 +92,7 @@ public JFloat() { setCType(new CType()); } + @Override String getSignature() { return "f"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JInt.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JInt.java index b1303e44bd..d18445ff55 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JInt.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JInt.java @@ -38,10 +38,12 @@ class JavaInt extends JavaType { super("int", "Int", "Integer", "TypeID.RIOType.INT"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.IntTypeID"; } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n"); @@ -50,6 +52,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); @@ -70,6 +73,7 @@ class CppInt extends CppType { super("int32_t"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_INT)"; } @@ -82,6 +86,7 @@ public JInt() { setCType(new CType()); } + @Override String getSignature() { return "i"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JLong.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JLong.java index ca09f053b4..f540fc808d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JLong.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JLong.java @@ -37,15 +37,18 @@ class JavaLong extends JavaType { super("long", "Long", "Long", "TypeID.RIOType.LONG"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.LongTypeID"; } + @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = (int) ("+fname+"^("+ fname+">>>32));\n"); } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("long i = org.apache.hadoop.record.Utils.readVLong("+b+", "+s+");\n"); @@ -54,6 +57,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);\n"); @@ -74,6 +78,7 @@ class CppLong extends CppType { super("int64_t"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_LONG)"; } @@ -86,6 +91,7 @@ public JLong() { setCType(new CType()); } + @Override String getSignature() { return "l"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JMap.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JMap.java index 4758accb51..03dcad322e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JMap.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JMap.java @@ -59,17 +59,20 @@ class JavaMap extends JavaCompType { this.value = value; } + @Override String getTypeIDObjectString() { return "new org.apache.hadoop.record.meta.MapTypeID(" + key.getTypeIDObjectString() + ", " + value.getTypeIDObjectString() + ")"; } + @Override void genSetRTIFilter(CodeBuffer cb, Map nestedStructMap) { key.genSetRTIFilter(cb, nestedStructMap); value.genSetRTIFilter(cb, nestedStructMap); } + @Override void genCompareTo(CodeBuffer cb, String fname, String other) { String setType = "java.util.Set<"+key.getWrapperType()+"> "; String iterType = "java.util.Iterator<"+key.getWrapperType()+"> "; @@ -98,6 +101,7 @@ void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append("}\n"); } + @Override void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { if (decl) { cb.append(getType()+" "+fname+";\n"); @@ -122,6 +126,7 @@ void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { cb.append("}\n"); } + @Override void genWriteMethod(CodeBuffer cb, String fname, String tag) { String setType = "java.util.Set> "; @@ -153,6 +158,7 @@ void genWriteMethod(CodeBuffer cb, String fname, String tag) { decrLevel(); } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); incrLevel(); @@ -170,6 +176,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); incrLevel(); @@ -208,12 +215,14 @@ class CppMap extends CppCompType { this.value = value; } + @Override String getTypeIDObjectString() { return "new ::hadoop::MapTypeID(" + key.getTypeIDObjectString() + ", " + value.getTypeIDObjectString() + ")"; } + @Override void genSetRTIFilter(CodeBuffer cb) { key.genSetRTIFilter(cb); value.genSetRTIFilter(cb); @@ -230,6 +239,7 @@ public JMap(JType t1, JType t2) { valueType = t2; } + @Override String getSignature() { return "{" + keyType.getSignature() + valueType.getSignature() +"}"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JRecord.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JRecord.java index 647d3a7baa..80e545ba3a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JRecord.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JRecord.java @@ -54,11 +54,13 @@ class JavaRecord extends JavaCompType { } } + @Override String getTypeIDObjectString() { return "new org.apache.hadoop.record.meta.StructTypeID(" + fullName + ".getTypeInfo())"; } + @Override void genSetRTIFilter(CodeBuffer cb, Map nestedStructMap) { // ignore, if we'ev already set the type filter for this record if (!nestedStructMap.containsKey(fullName)) { @@ -129,6 +131,7 @@ void genSetupRtiFields(CodeBuffer cb) { cb.append("}\n"); } + @Override void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { if (decl) { cb.append(fullName+" "+fname+";\n"); @@ -137,10 +140,12 @@ void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { cb.append(fname+".deserialize(" + Consts.RECORD_INPUT + ",\""+tag+"\");\n"); } + @Override void genWriteMethod(CodeBuffer cb, String fname, String tag) { cb.append(fname+".serialize(" + Consts.RECORD_OUTPUT + ",\""+tag+"\");\n"); } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int r = "+fullName+ @@ -149,6 +154,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int r1 = "+fullName+ @@ -492,6 +498,7 @@ class CppRecord extends CppCompType { } } + @Override String getTypeIDObjectString() { return "new ::hadoop::StructTypeID(" + fullName + "::getTypeInfo().getFieldTypeInfos())"; @@ -501,6 +508,7 @@ String genDecl(String fname) { return " "+name+" "+fname+";\n"; } + @Override void genSetRTIFilter(CodeBuffer cb) { // we set the RTI filter here cb.append(fullName + "::setTypeFilter(rti.getNestedStructTypeInfo(\""+ @@ -797,6 +805,7 @@ public JRecord(String name, ArrayList> flist) { signature = sb.toString(); } + @Override String getSignature() { return signature; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JString.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JString.java index 5c712e963c..cd3ab3dc35 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JString.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JString.java @@ -36,10 +36,12 @@ class JavaString extends JavaCompType { super("String", "String", "String", "TypeID.RIOType.STRING"); } + @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.StringTypeID"; } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n"); @@ -48,6 +50,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); @@ -61,6 +64,7 @@ void genCompareBytes(CodeBuffer cb) { cb.append("}\n"); } + @Override void genClone(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "other."+fname+" = this."+fname+";\n"); } @@ -72,6 +76,7 @@ class CppString extends CppCompType { super("::std::string"); } + @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_STRING)"; } @@ -84,6 +89,7 @@ public JString() { setCType(new CCompType()); } + @Override String getSignature() { return "s"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JVector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JVector.java index 8bce5cc91f..46ecbada51 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JVector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JVector.java @@ -54,15 +54,18 @@ class JavaVector extends JavaCompType { element = t; } + @Override String getTypeIDObjectString() { return "new org.apache.hadoop.record.meta.VectorTypeID(" + element.getTypeIDObjectString() + ")"; } + @Override void genSetRTIFilter(CodeBuffer cb, Map nestedStructMap) { element.genSetRTIFilter(cb, nestedStructMap); } + @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append("{\n"); incrLevel(); @@ -92,6 +95,7 @@ void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append("}\n"); } + @Override void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { if (decl) { cb.append(getType()+" "+fname+";\n"); @@ -113,6 +117,7 @@ void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { cb.append("}\n"); } + @Override void genWriteMethod(CodeBuffer cb, String fname, String tag) { cb.append("{\n"); incrLevel(); @@ -131,6 +136,7 @@ void genWriteMethod(CodeBuffer cb, String fname, String tag) { decrLevel(); } + @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); incrLevel(); @@ -146,6 +152,7 @@ void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("}\n"); } + @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); incrLevel(); @@ -179,11 +186,13 @@ class CppVector extends CppCompType { element = t; } + @Override String getTypeIDObjectString() { return "new ::hadoop::VectorTypeID(" + element.getTypeIDObjectString() + ")"; } + @Override void genSetRTIFilter(CodeBuffer cb) { element.genSetRTIFilter(cb); } @@ -198,6 +207,7 @@ public JVector(JType t) { setCType(new CCompType()); } + @Override String getSignature() { return "[" + type.getSignature() + "]"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JavaGenerator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JavaGenerator.java index 04c4bd8473..6d51df6cd1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JavaGenerator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JavaGenerator.java @@ -39,6 +39,7 @@ class JavaGenerator extends CodeGenerator { * @param rlist List of records defined within this file * @param destDir output directory */ + @Override void genCode(String name, ArrayList ilist, ArrayList rlist, String destDir, ArrayList options) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/ant/RccTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/ant/RccTask.java index 5f999ecb88..869e0594f7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/ant/RccTask.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/ant/RccTask.java @@ -110,6 +110,7 @@ public void addFileset(FileSet set) { /** * Invoke the Hadoop record compiler on each record definition file */ + @Override public void execute() throws BuildException { if (src == null && filesets.size()==0) { throw new BuildException("There must be a file attribute or a fileset child element"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/ParseException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/ParseException.java index 3d4a82bac6..3af5910ccb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/ParseException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/ParseException.java @@ -120,6 +120,7 @@ public ParseException(String message) { * of the final stack trace, and hence the correct error message * gets displayed. */ + @Override public String getMessage() { if (!specialConstructor) { return super.getMessage(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Rcc.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Rcc.java index fcac0997d6..c4c74cd651 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Rcc.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Rcc.java @@ -24,7 +24,6 @@ import org.apache.hadoop.record.compiler.*; import java.util.ArrayList; import java.util.Hashtable; -import java.util.Iterator; import java.io.File; import java.io.FileReader; import java.io.FileNotFoundException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java index 72acd13f74..7488606fe9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java @@ -20,14 +20,6 @@ package org.apache.hadoop.record.compiler.generated; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.record.compiler.*; -import java.util.ArrayList; -import java.util.Hashtable; -import java.util.Iterator; -import java.io.File; -import java.io.FileReader; -import java.io.FileNotFoundException; -import java.io.IOException; /** * @deprecated Replaced by Avro. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Token.java index 37df5b97e0..1396bf899b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Token.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Token.java @@ -78,6 +78,7 @@ public class Token { /** * Returns the image. */ + @Override public String toString() { return image; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java index 4b0712e82f..b6da7dadcd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java @@ -138,6 +138,7 @@ protected static String LexicalError(boolean EOFSeen, int lexState, int errorLin * * from this method for such cases in the release version of your parser. */ + @Override public String getMessage() { return super.getMessage(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/FieldTypeInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/FieldTypeInfo.java index f7f4fb0d02..32436abf82 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/FieldTypeInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/FieldTypeInfo.java @@ -69,6 +69,7 @@ void write(RecordOutput rout, String tag) throws IOException { /** * Two FieldTypeInfos are equal if ach of their fields matches */ + @Override public boolean equals(Object o) { if (this == o) return true; @@ -87,6 +88,7 @@ public boolean equals(Object o) { * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ + @Override public int hashCode() { return 37*17+typeID.hashCode() + 37*17+fieldID.hashCode(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/MapTypeID.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/MapTypeID.java index 3a83d0896c..f9c5320cfb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/MapTypeID.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/MapTypeID.java @@ -19,8 +19,6 @@ package org.apache.hadoop.record.meta; import java.io.IOException; -import java.util.*; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordOutput; @@ -58,6 +56,7 @@ public TypeID getValueTypeID() { return this.typeIDValue; } + @Override void write(RecordOutput rout, String tag) throws IOException { rout.writeByte(typeVal, tag); typeIDKey.write(rout, tag); @@ -68,6 +67,7 @@ void write(RecordOutput rout, String tag) throws IOException { * Two map typeIDs are equal if their constituent elements have the * same type */ + @Override public boolean equals(Object o) { if (!super.equals(o)) return false; @@ -82,6 +82,7 @@ public boolean equals(Object o) { * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ + @Override public int hashCode() { return 37*17+typeIDKey.hashCode() + 37*17+typeIDValue.hashCode(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/RecordTypeInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/RecordTypeInfo.java index 3bd153cdc3..8a9d0b5fbb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/RecordTypeInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/RecordTypeInfo.java @@ -122,6 +122,7 @@ public RecordTypeInfo getNestedStructTypeInfo(String name) { /** * Serialize the type information for a record */ + @Override public void serialize(RecordOutput rout, String tag) throws IOException { // write out any header, version info, here rout.startRecord(this, tag); @@ -133,6 +134,7 @@ public void serialize(RecordOutput rout, String tag) throws IOException { /** * Deserialize the type information for a record */ + @Override public void deserialize(RecordInput rin, String tag) throws IOException { // read in any header, version info rin.startRecord(tag); @@ -148,6 +150,7 @@ public void deserialize(RecordInput rin, String tag) throws IOException { * So we always throw an exception. * Not implemented. Always returns 0 if another RecordTypeInfo is passed in. */ + @Override public int compareTo (final Object peer_) throws ClassCastException { if (!(peer_ instanceof RecordTypeInfo)) { throw new ClassCastException("Comparing different types of records."); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/StructTypeID.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/StructTypeID.java index b7d19ea815..d2c9ccdc75 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/StructTypeID.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/StructTypeID.java @@ -72,6 +72,7 @@ StructTypeID findStruct(String name) { return null; } + @Override void write(RecordOutput rout, String tag) throws IOException { rout.writeByte(typeVal, tag); writeRest(rout, tag); @@ -155,9 +156,11 @@ private TypeID genericReadTypeID(RecordInput rin, String tag) throws IOException } } + @Override public boolean equals(Object o) { return super.equals(o); } + @Override public int hashCode() { return super.hashCode(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/TypeID.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/TypeID.java index ea2e35eb79..5a76eb4bd1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/TypeID.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/TypeID.java @@ -89,6 +89,7 @@ void write(RecordOutput rout, String tag) throws IOException { /** * Two base typeIDs are equal if they refer to the same type */ + @Override public boolean equals(Object o) { if (this == o) return true; @@ -107,6 +108,7 @@ public boolean equals(Object o) { * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ + @Override public int hashCode() { // See 'Effectve Java' by Joshua Bloch return 37*17+(int)typeVal; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/VectorTypeID.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/VectorTypeID.java index 88f820b8b8..22ab07efdc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/VectorTypeID.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/VectorTypeID.java @@ -43,6 +43,7 @@ public TypeID getElementTypeID() { return this.typeIDElement; } + @Override void write(RecordOutput rout, String tag) throws IOException { rout.writeByte(typeVal, tag); typeIDElement.write(rout, tag); @@ -52,6 +53,7 @@ void write(RecordOutput rout, String tag) throws IOException { * Two vector typeIDs are equal if their constituent elements have the * same type */ + @Override public boolean equals(Object o) { if (!super.equals (o)) return false; @@ -64,6 +66,7 @@ public boolean equals(Object o) { * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ + @Override public int hashCode() { return 37*17+typeIDElement.hashCode(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java index 0e1c0864f5..b72e3ed6df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java @@ -22,7 +22,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.security.KerberosInfo; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java index fa82664bdd..7ee452316a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java @@ -189,6 +189,7 @@ public SaslInputStream(InputStream inStream, SaslClient saslClient) { * @exception IOException * if an I/O error occurs. */ + @Override public int read() throws IOException { if (!useWrap) { return inStream.read(); @@ -220,6 +221,7 @@ public int read() throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); } @@ -242,6 +244,7 @@ public int read(byte[] b) throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public int read(byte[] b, int off, int len) throws IOException { if (!useWrap) { return inStream.read(b, off, len); @@ -286,6 +289,7 @@ public int read(byte[] b, int off, int len) throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public long skip(long n) throws IOException { if (!useWrap) { return inStream.skip(n); @@ -312,6 +316,7 @@ public long skip(long n) throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public int available() throws IOException { if (!useWrap) { return inStream.available(); @@ -329,6 +334,7 @@ public int available() throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public void close() throws IOException { disposeSasl(); ostart = 0; @@ -344,6 +350,7 @@ public void close() throws IOException { * @return false, since this class does not support the * mark and reset methods. */ + @Override public boolean markSupported() { return false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslOutputStream.java index 4a0f3cb42c..494ba1e7a4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslOutputStream.java @@ -19,9 +19,7 @@ package org.apache.hadoop.security; import java.io.BufferedOutputStream; -import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.io.OutputStream; @@ -122,6 +120,7 @@ private void disposeSasl() throws SaslException { * @exception IOException * if an I/O error occurs. */ + @Override public void write(int b) throws IOException { if (!useWrap) { outStream.write(b); @@ -146,6 +145,7 @@ public void write(int b) throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public void write(byte[] b) throws IOException { write(b, 0, b.length); } @@ -163,6 +163,7 @@ public void write(byte[] b) throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public void write(byte[] inBuf, int off, int len) throws IOException { if (!useWrap) { outStream.write(inBuf, off, len); @@ -197,6 +198,7 @@ public void write(byte[] inBuf, int off, int len) throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public void flush() throws IOException { outStream.flush(); } @@ -208,6 +210,7 @@ public void flush() throws IOException { * @exception IOException * if an I/O error occurs. */ + @Override public void close() throws IOException { disposeSasl(); outStream.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java index 54b1502acc..98b3f5db29 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java @@ -239,6 +239,7 @@ public SaslClientCallbackHandler(Token token) { this.userPassword = SaslRpcServer.encodePassword(token.getPassword()); } + @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { NameCallback nc = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index b0588c27fd..31718628f2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -194,7 +194,6 @@ private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken { return encodePassword(secretManager.retrievePassword(tokenid)); } - /** {@inheritDoc} */ @Override public void handle(Callback[] callbacks) throws InvalidToken, UnsupportedCallbackException { @@ -253,7 +252,6 @@ public void handle(Callback[] callbacks) throws InvalidToken, @InterfaceStability.Evolving public static class SaslGssCallbackHandler implements CallbackHandler { - /** {@inheritDoc} */ @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index 2f65892db7..25bae83b1e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -498,6 +498,7 @@ interface HostResolver { * Uses standard java host resolution */ static class StandardHostResolver implements HostResolver { + @Override public InetAddress getByName(String host) throws UnknownHostException { return InetAddress.getByName(host); } @@ -542,6 +543,7 @@ protected static class QualifiedHostResolver implements HostResolver { * @return InetAddress with the fully qualified hostname or ip * @throws UnknownHostException if host does not exist */ + @Override public InetAddress getByName(String host) throws UnknownHostException { InetAddress addr = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 1b14927bd7..6335fc7146 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -20,10 +20,7 @@ import java.io.IOException; import java.util.LinkedList; import java.util.List; -import java.util.Map; import java.util.StringTokenizer; -import java.util.concurrent.ConcurrentHashMap; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java index 7e60bed26a..0ee1c60c59 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java @@ -20,12 +20,6 @@ import java.io.IOException; import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.HashSet; -import java.util.StringTokenizer; -import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 0d3c482289..184b40d8ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION; -import java.io.File; import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; import java.security.AccessControlContext; @@ -33,7 +32,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -344,6 +342,7 @@ private static class RealUser implements Principal { this.realUser = realUser; } + @Override public String getName() { return realUser.getUserName(); } @@ -700,6 +699,7 @@ private void spawnAutoRenewalThreadForUserCreds() { !isKeytab) { Thread t = new Thread(new Runnable() { + @Override public void run() { String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java index 922d330842..e23612ec0f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java @@ -48,6 +48,7 @@ public class AccessControlList implements Writable { WritableFactories.setFactory (AccessControlList.class, new WritableFactory() { + @Override public Writable newInstance() { return new AccessControlList(); } }); } @@ -318,6 +319,7 @@ public String getAclString() { /** * Serializes the AccessControlList object */ + @Override public void write(DataOutput out) throws IOException { String aclString = getAclString(); Text.writeString(out, aclString); @@ -326,6 +328,7 @@ public void write(DataOutput out) throws IOException { /** * Deserializes the AccessControlList object */ + @Override public void readFields(DataInput in) throws IOException { String aclString = Text.readString(in); buildACL(aclString); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/PolicyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/PolicyProvider.java index c2176e5989..6b86a05e7a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/PolicyProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/PolicyProvider.java @@ -42,6 +42,7 @@ public abstract class PolicyProvider { */ public static final PolicyProvider DEFAULT_POLICY_PROVIDER = new PolicyProvider() { + @Override public Service[] getServices() { return null; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java index 9ab6d68daf..4407a7e8e3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java @@ -22,7 +22,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.security.KerberosInfo; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index 00dd2021ee..4c17f9fd25 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -124,6 +124,7 @@ public Configuration getConf() { * @throws GeneralSecurityException thrown if the keystores could not be * initialized due to a security error. */ + @Override public void init(SSLFactory.Mode mode) throws IOException, GeneralSecurityException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java index 3f88fb89a7..c59000ea6a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java @@ -81,6 +81,7 @@ @InterfaceStability.Evolving public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier { + @Override boolean verify(String host, SSLSession session); void check(String host, SSLSocket ssl) throws IOException; @@ -125,12 +126,14 @@ void check(String[] hosts, String[] cns, String[] subjectAlts) */ public final static SSLHostnameVerifier DEFAULT = new AbstractVerifier() { + @Override public final void check(final String[] hosts, final String[] cns, final String[] subjectAlts) throws SSLException { check(hosts, cns, subjectAlts, false, false); } + @Override public final String toString() { return "DEFAULT"; } }; @@ -143,6 +146,7 @@ public final void check(final String[] hosts, final String[] cns, */ public final static SSLHostnameVerifier DEFAULT_AND_LOCALHOST = new AbstractVerifier() { + @Override public final void check(final String[] hosts, final String[] cns, final String[] subjectAlts) throws SSLException { @@ -152,6 +156,7 @@ public final void check(final String[] hosts, final String[] cns, check(hosts, cns, subjectAlts, false, false); } + @Override public final String toString() { return "DEFAULT_AND_LOCALHOST"; } }; @@ -173,12 +178,14 @@ public final void check(final String[] hosts, final String[] cns, */ public final static SSLHostnameVerifier STRICT = new AbstractVerifier() { + @Override public final void check(final String[] host, final String[] cns, final String[] subjectAlts) throws SSLException { check(host, cns, subjectAlts, false, true); } + @Override public final String toString() { return "STRICT"; } }; @@ -190,12 +197,14 @@ public final void check(final String[] host, final String[] cns, */ public final static SSLHostnameVerifier STRICT_IE6 = new AbstractVerifier() { + @Override public final void check(final String[] host, final String[] cns, final String[] subjectAlts) throws SSLException { check(host, cns, subjectAlts, true, true); } + @Override public final String toString() { return "STRICT_IE6"; } }; @@ -205,11 +214,13 @@ public final void check(final String[] host, final String[] cns, */ public final static SSLHostnameVerifier ALLOW_ALL = new AbstractVerifier() { + @Override public final void check(final String[] host, final String[] cns, final String[] subjectAlts) { // Allow everything - so never blowup. } + @Override public final String toString() { return "ALLOW_ALL"; } }; @@ -250,6 +261,7 @@ protected AbstractVerifier() {} * @param session SSLSession with the remote server * @return true if the host matched the one in the certificate. */ + @Override public boolean verify(String host, SSLSession session) { try { Certificate[] certs = session.getPeerCertificates(); @@ -262,20 +274,24 @@ public boolean verify(String host, SSLSession session) { } } + @Override public void check(String host, SSLSocket ssl) throws IOException { check(new String[]{host}, ssl); } + @Override public void check(String host, X509Certificate cert) throws SSLException { check(new String[]{host}, cert); } + @Override public void check(String host, String[] cns, String[] subjectAlts) throws SSLException { check(new String[]{host}, cns, subjectAlts); } + @Override public void check(String host[], SSLSocket ssl) throws IOException { if (host == null) { @@ -332,6 +348,7 @@ is presenting (e.g. edit "/etc/apache2/server.crt" check(host, x509); } + @Override public void check(String[] host, X509Certificate cert) throws SSLException { String[] cns = Certificates.getCNs(cert); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java index bbddf6fdc7..905c948da7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java @@ -195,7 +195,7 @@ public void setService(Text newService) { service = newService; } - /** {@inheritDoc} */ + @Override public void readFields(DataInput in) throws IOException { int len = WritableUtils.readVInt(in); if (identifier == null || identifier.length != len) { @@ -211,7 +211,7 @@ public void readFields(DataInput in) throws IOException { service.readFields(in); } - /** {@inheritDoc} */ + @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, identifier.length); out.write(identifier); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java index b3e367bdf2..6ec3b7e606 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java @@ -85,6 +85,7 @@ public AbstractDelegationTokenIdentifier(Text owner, Text renewer, Text realUser * * @return the username or owner */ + @Override public UserGroupInformation getUser() { if ( (owner == null) || ("".equals(owner.toString()))) { return null; @@ -150,7 +151,7 @@ static boolean isEqual(Object a, Object b) { return a == null ? b == null : a.equals(b); } - /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { if (obj == this) { return true; @@ -168,11 +169,12 @@ && isEqual(this.renewer, that.renewer) return false; } - /** {@inheritDoc} */ + @Override public int hashCode() { return this.sequenceNumber; } + @Override public void readFields(DataInput in) throws IOException { byte version = in.readByte(); if (version != VERSION) { @@ -200,6 +202,7 @@ void writeImpl(DataOutput out) throws IOException { WritableUtils.writeVInt(out, masterKeyId); } + @Override public void write(DataOutput out) throws IOException { if (owner.getLength() > Text.DEFAULT_MAX_LEN) { throw new IOException("owner is too long to be serialized!"); @@ -213,6 +216,7 @@ public void write(DataOutput out) throws IOException { writeImpl(out); } + @Override public String toString() { StringBuilder buffer = new StringBuilder(); buffer diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 97530d10d0..29367a38ab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -404,6 +404,7 @@ private class ExpiredTokenRemover extends Thread { private long lastMasterKeyUpdate; private long lastTokenCacheCleanup; + @Override public void run() { LOG.info("Starting expired delegation token remover thread, " + "tokenRemoverScanInterval=" + tokenRemoverScanInterval diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java index 3b5705eb6d..3458b2df82 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java @@ -91,6 +91,7 @@ public void setExpiryDate(long expiryDate) { /** */ + @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, keyId); WritableUtils.writeVLong(out, expiryDate); @@ -104,6 +105,7 @@ public void write(DataOutput out) throws IOException { /** */ + @Override public void readFields(DataInput in) throws IOException { keyId = WritableUtils.readVInt(in); expiryDate = WritableUtils.readVLong(in); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java index 0f5bf7a513..c0c107933f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java @@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ipc.VersionedProtocol; /** * Protocol implemented by the Name Node and Job Tracker which maps users to diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java index 4711ed2f56..a1e20d242d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java @@ -71,6 +71,7 @@ public class AsyncDiskService { public AsyncDiskService(String[] volumes) throws IOException { threadFactory = new ThreadFactory() { + @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java index 4813847e84..7d321e8a29 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java @@ -240,19 +240,23 @@ static public int getChecksumHeaderSize() { return 1 + SIZE_OF_INTEGER; // type byte, bytesPerChecksum int } //Checksum Interface. Just a wrapper around member summer. + @Override public long getValue() { return summer.getValue(); } + @Override public void reset() { summer.reset(); inSum = 0; } + @Override public void update( byte[] b, int off, int len ) { if ( len > 0 ) { summer.update( b, off, len ); inSum += len; } } + @Override public void update( int b ) { summer.update( b ); inSum += 1; @@ -444,9 +448,13 @@ static class ChecksumNull implements Checksum { public ChecksumNull() {} //Dummy interface + @Override public long getValue() { return 0; } + @Override public void reset() {} + @Override public void update(byte[] b, int off, int len) {} + @Override public void update(int b) {} }; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HeapSort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HeapSort.java index b9d2fc17ca..5a0fb27fe4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HeapSort.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HeapSort.java @@ -48,13 +48,12 @@ private static void downHeap(final IndexedSortable s, final int b, * Sort the given range of items using heap sort. * {@inheritDoc} */ + @Override public void sort(IndexedSortable s, int p, int r) { sort(s, p, r, null); } - /** - * {@inheritDoc} - */ + @Override public void sort(final IndexedSortable s, final int p, final int r, final Progressable rep) { final int N = r - p; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java index 536b6f27ab..9064357747 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java @@ -214,6 +214,7 @@ public synchronized void setStatus(String status) { this.status = status; } + @Override public String toString() { StringBuilder result = new StringBuilder(); toString(result); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java index a7a2d37c84..3dd30fe6b0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java @@ -46,17 +46,17 @@ public PureJavaCrc32() { reset(); } - /** {@inheritDoc} */ + @Override public long getValue() { return (~crc) & 0xffffffffL; } - /** {@inheritDoc} */ + @Override public void reset() { crc = 0xffffffff; } - /** {@inheritDoc} */ + @Override public void update(byte[] b, int off, int len) { int localCrc = crc; while(len > 7) { @@ -81,7 +81,7 @@ public void update(byte[] b, int off, int len) { crc = localCrc; } - /** {@inheritDoc} */ + @Override final public void update(int b) { crc = (crc >>> 8) ^ T8_0[(crc ^ b) & 0xff]; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java index 3d52eae077..7fdfe1489f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java @@ -42,18 +42,18 @@ public PureJavaCrc32C() { reset(); } - /** {@inheritDoc} */ + @Override public long getValue() { long ret = crc; return (~ret) & 0xffffffffL; } - /** {@inheritDoc} */ + @Override public void reset() { crc = 0xffffffff; } - /** {@inheritDoc} */ + @Override public void update(byte[] b, int off, int len) { int localCrc = crc; while(len > 7) { @@ -78,7 +78,7 @@ public void update(byte[] b, int off, int len) { crc = localCrc; } - /** {@inheritDoc} */ + @Override final public void update(int b) { crc = (crc >>> 8) ^ T8_0[(crc ^ b) & 0xff]; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/QuickSort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/QuickSort.java index 5686f82d05..73d8d90d42 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/QuickSort.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/QuickSort.java @@ -52,13 +52,12 @@ protected static int getMaxDepth(int x) { * {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth}, * then switch to {@link HeapSort}. */ + @Override public void sort(IndexedSortable s, int p, int r) { sort(s, p, r, null); } - /** - * {@inheritDoc} - */ + @Override public void sort(final IndexedSortable s, int p, int r, final Progressable rep) { sortInternal(s, p, r, rep, getMaxDepth(r - p)); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java index 4520cb264a..bf12de633f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java @@ -257,6 +257,7 @@ void moveData() { */ private static ThreadLocal cloneBuffers = new ThreadLocal() { + @Override protected synchronized CopyInCopyOutBuffer initialValue() { return new CopyInCopyOutBuffer(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index d563c1d7d5..b8c16f214d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -30,7 +30,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; /** * A base class for running a Unix command. @@ -323,10 +322,12 @@ public void execute() throws IOException { this.run(); } + @Override public String[] getExecString() { return command; } + @Override protected void parseExecResult(BufferedReader lines) throws IOException { output = new StringBuffer(); char[] buf = new char[512]; @@ -348,6 +349,7 @@ public String getOutput() { * * @return a string representation of the object. */ + @Override public String toString() { StringBuilder builder = new StringBuilder(); String[] args = getExecString(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Filter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Filter.java index e39463fc84..f183a4c53c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Filter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Filter.java @@ -193,6 +193,7 @@ public void add(Key[] keys){ // Writable interface + @Override public void write(DataOutput out) throws IOException { out.writeInt(VERSION); out.writeInt(this.nbHash); @@ -200,6 +201,7 @@ public void write(DataOutput out) throws IOException { out.writeInt(this.vectorSize); } + @Override public void readFields(DataInput in) throws IOException { int ver = in.readInt(); if (ver > 0) { // old unversioned format diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Key.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Key.java index 1ff5b82600..7ac134c76c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Key.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Key.java @@ -154,12 +154,14 @@ public int hashCode() { // Writable + @Override public void write(DataOutput out) throws IOException { out.writeInt(bytes.length); out.write(bytes); out.writeDouble(weight); } + @Override public void readFields(DataInput in) throws IOException { this.bytes = new byte[in.readInt()]; in.readFully(this.bytes); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/JenkinsHash.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/JenkinsHash.java index 8e867c4cbb..bf4891378c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/JenkinsHash.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/JenkinsHash.java @@ -81,6 +81,7 @@ private static long rot(long val, int pos) { *

Use for hash table lookup, or anything where one collision in 2^^32 is * acceptable. Do NOT use for cryptographic purposes. */ + @Override @SuppressWarnings("fallthrough") public int hash(byte[] key, int nbytes, int initval) { int length = nbytes; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/MurmurHash.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/MurmurHash.java index 8e79f1aa5a..6ed3dfd3df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/MurmurHash.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/MurmurHash.java @@ -37,6 +37,7 @@ public static Hash getInstance() { return _instance; } + @Override public int hash(byte[] data, int length, int seed) { int m = 0x5bd1e995; int r = 24; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java index 202b2429cb..50cb3a53c5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java @@ -24,5 +24,6 @@ public interface CLICommand { public CommandExecutor getExecutor(String tag) throws IllegalArgumentException; public CLICommandTypes getType(); public String getCmd(); + @Override public String toString(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java index 55e99b51a6..602a07f3d5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java @@ -31,6 +31,7 @@ public CLITestCmd(String str, CLICommandTypes type) { this.type = type; } + @Override public CommandExecutor getExecutor(String tag) throws IllegalArgumentException { if (getType() instanceof CLICommandFS) return new FSCmdExecutor(tag, new FsShell()); @@ -38,12 +39,17 @@ public CommandExecutor getExecutor(String tag) throws IllegalArgumentException { IllegalArgumentException("Unknown type of test command: " + getType()); } + @Override public CLICommandTypes getType() { return type; } + + @Override public String getCmd() { return cmd; } + + @Override public String toString() { return cmd; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/FSCmdExecutor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/FSCmdExecutor.java index 86e86b6e1f..98237ac726 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/FSCmdExecutor.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/FSCmdExecutor.java @@ -29,6 +29,7 @@ public FSCmdExecutor(String namenode, FsShell shell) { this.shell = shell; } + @Override protected void execute(final String cmd) throws Exception{ String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode); ToolRunner.run(shell, args); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java index 1928de44a4..1c22ee68c7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java @@ -24,7 +24,6 @@ import javax.xml.parsers.DocumentBuilderFactory; import org.mortbay.util.ajax.JSON; -import org.mortbay.util.ajax.JSON.Output; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 679ced34ee..27842be427 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -39,8 +39,6 @@ import junit.framework.TestCase; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertNotNull; - import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.Path; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java index df346dd657..014844e28b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java @@ -19,8 +19,6 @@ package org.apache.hadoop.conf; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java index b8f820c024..3036d0c839 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java @@ -18,9 +18,6 @@ package org.apache.hadoop.conf; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import java.io.ByteArrayOutputStream; import java.util.Map; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index 2cfb56a416..f4367523cb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -99,17 +99,11 @@ public ReconfigurableDummy(Configuration conf) { super(conf); } - /** - * {@inheritDoc} - */ @Override public Collection getReconfigurableProperties() { return Arrays.asList(PROP1, PROP2, PROP4); } - /** - * {@inheritDoc} - */ @Override public synchronized void reconfigurePropertyImpl(String property, String newVal) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java index 6c50100901..e9677badc3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java @@ -67,6 +67,7 @@ public abstract class FSMainOperationsBaseTest { protected static FileSystem fSys; final private static PathFilter DEFAULT_FILTER = new PathFilter() { + @Override public boolean accept(final Path file) { return true; } @@ -74,6 +75,7 @@ public boolean accept(final Path file) { //A test filter with returns any path containing a "b" final private static PathFilter TEST_X_FILTER = new PathFilter() { + @Override public boolean accept(Path file) { if(file.getName().contains("x") || file.getName().contains("X")) return true; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java index 150b68e35d..bf60e02cd6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java @@ -67,6 +67,7 @@ public abstract class FileContextMainOperationsBaseTest { protected static FileContext fc; final private static PathFilter DEFAULT_FILTER = new PathFilter() { + @Override public boolean accept(final Path file) { return true; } @@ -74,6 +75,7 @@ public boolean accept(final Path file) { //A test filter with returns any path containing a "b" final private static PathFilter TEST_X_FILTER = new PathFilter() { + @Override public boolean accept(Path file) { if(file.getName().contains("x") || file.getName().contains("X")) return true; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java index 39ae24659b..b80764cebf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java @@ -176,6 +176,7 @@ public void testUgi() throws IOException, InterruptedException { .createRemoteUser("otherUser"); FileContext newFc = otherUser.doAs(new PrivilegedExceptionAction() { + @Override public FileContext run() throws Exception { FileContext newFc = FileContext.getFileContext(); return newFc; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java index 5786a6653c..0acd416dd8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java @@ -20,8 +20,6 @@ import java.io.*; import java.util.ArrayList; -import java.util.Iterator; - import junit.framework.Assert; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java index 5d2c595f5c..3e5970d228 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java @@ -19,7 +19,6 @@ package org.apache.hadoop.fs; import java.io.BufferedWriter; -import java.io.IOException; import java.io.OutputStreamWriter; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java index ffb1dcf1f1..de3d5566eb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java @@ -29,11 +29,13 @@ public class TestDU extends TestCase { final static private File DU_DIR = new File( System.getProperty("test.build.data","/tmp"), "dutmp"); + @Override public void setUp() { FileUtil.fullyDelete(DU_DIR); assertTrue(DU_DIR.mkdirs()); } + @Override public void tearDown() throws IOException { FileUtil.fullyDelete(DU_DIR); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java index 128c1fb088..e0ee5f03f2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java @@ -28,6 +28,7 @@ public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTest { + @Override @Before public void setUp() throws Exception { fSys = FileSystem.getLocal(new Configuration()); @@ -35,12 +36,14 @@ public void setUp() throws Exception { } static Path wd = null; + @Override protected Path getDefaultWorkingDirectory() throws IOException { if (wd == null) wd = FileSystem.getLocal(new Configuration()).getWorkingDirectory(); return wd; } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsPermission.java index 335f403fe7..439ce2c15c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsPermission.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsPermission.java @@ -26,12 +26,14 @@ public class TestFcLocalFsPermission extends FileContextPermissionBase { + @Override @Before public void setUp() throws Exception { fc = FileContext.getLocalFSFileContext(); super.setUp(); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsUtil.java index bc1126f231..29b6463806 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsUtil.java @@ -25,6 +25,7 @@ public class TestFcLocalFsUtil extends FileContextUtilBase { + @Override @Before public void setUp() throws Exception { fc = FileContext.getLocalFSFileContext(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java index 7e5f99f5fb..8dff124d7e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java @@ -110,6 +110,7 @@ public void testDefaultFsUris() throws Exception { public static class InitializeForeverFileSystem extends LocalFileSystem { final static Semaphore sem = new Semaphore(0); + @Override public void initialize(URI uri, Configuration conf) throws IOException { // notify that InitializeForeverFileSystem started initialization sem.release(); @@ -127,6 +128,7 @@ public void initialize(URI uri, Configuration conf) throws IOException { public void testCacheEnabledWithInitializeForeverFS() throws Exception { final Configuration conf = new Configuration(); Thread t = new Thread() { + @Override public void run() { conf.set("fs.localfs1.impl", "org.apache.hadoop.fs." + "TestFileSystemCaching$InitializeForeverFileSystem"); @@ -167,11 +169,13 @@ public void testCacheForUgi() throws Exception { UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo"); UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar"); FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } }); FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } @@ -180,6 +184,7 @@ public FileSystem run() throws Exception { assertSame(fsA, fsA1); FileSystem fsB = ugiB.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } @@ -192,6 +197,7 @@ public FileSystem run() throws Exception { UserGroupInformation ugiA2 = UserGroupInformation.createRemoteUser("foo"); fsA = ugiA2.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } @@ -203,6 +209,7 @@ public FileSystem run() throws Exception { ugiA.addToken(t1); fsA = ugiA.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } @@ -245,12 +252,14 @@ public void testCloseAllForUGI() throws Exception { conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName()); UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo"); FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } }); //Now we should get the cached filesystem FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } @@ -261,6 +270,7 @@ public FileSystem run() throws Exception { //Now we should get a different (newly created) filesystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return FileSystem.get(new URI("cachedfile://a"), conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsOptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsOptions.java index c66b4fa901..574ed704da 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsOptions.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsOptions.java @@ -19,8 +19,6 @@ import static org.junit.Assert.*; -import java.io.IOException; - import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.util.DataChecksum; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java index dcb5871761..c6812a1930 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java @@ -411,6 +411,7 @@ public FileStatus getFileStatus(Path p) throws IOException { } static class MyFsShell extends FsShell { + @Override protected void registerCommands(CommandFactory factory) { factory.addClass(InterruptCommand.class, "-testInterrupt"); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java index 6f3c270232..aae013fd77 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java @@ -18,7 +18,6 @@ package org.apache.hadoop.fs; import java.io.IOException; -import java.util.Iterator; import java.util.HashSet; import java.util.Random; import java.util.Set; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextCreateMkdir.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextCreateMkdir.java index e3402abee9..f5decbb2b0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextCreateMkdir.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextCreateMkdir.java @@ -23,6 +23,7 @@ public class TestLocalFSFileContextCreateMkdir extends FileContextCreateMkdirBaseTest { + @Override @Before public void setUp() throws Exception { fc = FileContext.getLocalFSFileContext(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java index 901b6c96ea..d1c272cc85 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java @@ -27,6 +27,7 @@ public class TestLocalFSFileContextMainOperations extends FileContextMainOperationsBaseTest { + @Override @Before public void setUp() throws Exception { fc = FileContext.getLocalFSFileContext(); @@ -34,6 +35,7 @@ public void setUp() throws Exception { } static Path wd = null; + @Override protected Path getDefaultWorkingDirectory() throws IOException { if (wd == null) wd = FileSystem.getLocal(new Configuration()).getWorkingDirectory(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextSymlink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextSymlink.java index 89684fe720..64d0525a18 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextSymlink.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextSymlink.java @@ -35,18 +35,22 @@ */ public class TestLocalFSFileContextSymlink extends FileContextSymlinkBaseTest { + @Override protected String getScheme() { return "file"; } + @Override protected String testBaseDir1() throws IOException { return getAbsoluteTestRootDir(fc)+"/test1"; } + @Override protected String testBaseDir2() throws IOException { return getAbsoluteTestRootDir(fc)+"/test2"; } + @Override protected URI testURI() { try { return new URI("file:///"); @@ -55,6 +59,7 @@ protected URI testURI() { } } + @Override @Before public void setUp() throws Exception { fc = FileContext.getLocalFSFileContext(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java index 35c23cb0f3..45e9bfb79c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFsFCStatistics.java @@ -47,15 +47,18 @@ public void tearDown() throws Exception { fc.delete(getTestRootPath(fc, "test"), true); } + @Override protected void verifyReadBytes(Statistics stats) { Assert.assertEquals(blockSize, stats.getBytesRead()); } + @Override protected void verifyWrittenBytes(Statistics stats) { //Extra 12 bytes are written apart from the block. Assert.assertEquals(blockSize + 12, stats.getBytesWritten()); } + @Override protected URI getFsUri() { return URI.create(LOCAL_FS_ROOT_URI); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java index 512567a8d5..6c417cdb7c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocal_S3FileContextURI.java @@ -24,6 +24,7 @@ public class TestLocal_S3FileContextURI extends FileContextURIBase { + @Override @Before public void setUp() throws Exception { Configuration S3Conf = new Configuration(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java index c6324f8dc9..22fa5b0629 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestS3_LocalFileContextURI.java @@ -24,6 +24,7 @@ public class TestS3_LocalFileContextURI extends FileContextURIBase { + @Override @Before public void setUp() throws Exception { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index 8bfa7185b0..70bd62fa00 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -67,6 +67,7 @@ protected static int countSameDeletedFiles(FileSystem fs, // filter that matches all the files that start with fileName* PathFilter pf = new PathFilter() { + @Override public boolean accept(Path file) { return file.getName().startsWith(prefix); } @@ -563,6 +564,7 @@ static class TestLFS extends LocalFileSystem { super(); this.home = home; } + @Override public Path getHomeDirectory() { return home; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java index b8b6957266..baf25ded69 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java @@ -39,16 +39,20 @@ public KFSEmulationImpl(Configuration conf) throws IOException { localFS = FileSystem.getLocal(conf); } + @Override public boolean exists(String path) throws IOException { return localFS.exists(new Path(path)); } + @Override public boolean isDirectory(String path) throws IOException { return localFS.isDirectory(new Path(path)); } + @Override public boolean isFile(String path) throws IOException { return localFS.isFile(new Path(path)); } + @Override public String[] readdir(String path) throws IOException { FileStatus[] p = localFS.listStatus(new Path(path)); try { @@ -64,10 +68,12 @@ public String[] readdir(String path) throws IOException { return entries; } + @Override public FileStatus[] readdirplus(Path path) throws IOException { return localFS.listStatus(path); } + @Override public int mkdirs(String path) throws IOException { if (localFS.mkdirs(new Path(path))) return 0; @@ -75,12 +81,14 @@ public int mkdirs(String path) throws IOException { return -1; } + @Override public int rename(String source, String dest) throws IOException { if (localFS.rename(new Path(source), new Path(dest))) return 0; return -1; } + @Override public int rmdir(String path) throws IOException { if (isDirectory(path)) { // the directory better be empty @@ -91,21 +99,26 @@ public int rmdir(String path) throws IOException { return -1; } + @Override public int remove(String path) throws IOException { if (isFile(path) && (localFS.delete(new Path(path), true))) return 0; return -1; } + @Override public long filesize(String path) throws IOException { return localFS.getFileStatus(new Path(path)).getLen(); } + @Override public short getReplication(String path) throws IOException { return 1; } + @Override public short setReplication(String path, short replication) throws IOException { return 1; } + @Override public String[][] getDataLocation(String path, long start, long len) throws IOException { BlockLocation[] blkLocations = localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)), @@ -123,6 +136,7 @@ public String[][] getDataLocation(String path, long start, long len) throws IOEx return hints; } + @Override public long getModificationTime(String path) throws IOException { FileStatus s = localFS.getFileStatus(new Path(path)); if (s == null) @@ -131,18 +145,21 @@ public long getModificationTime(String path) throws IOException { return s.getModificationTime(); } + @Override public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException { // besides path/overwrite, the other args don't matter for // testing purposes. return localFS.append(new Path(path)); } + @Override public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException { // besides path/overwrite, the other args don't matter for // testing purposes. return localFS.create(new Path(path)); } + @Override public FSDataInputStream open(String path, int bufferSize) throws IOException { return localFS.open(new Path(path)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java index 3ff998f996..c1c676e9b0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java @@ -18,21 +18,17 @@ package org.apache.hadoop.fs.kfs; -import java.io.*; -import java.net.*; +import java.io.IOException; +import java.net.URI; import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.kfs.KosmosFileSystem; - public class TestKosmosFileSystem extends TestCase { KosmosFileSystem kosmosFileSystem; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/DataGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/DataGenerator.java index 5124211d34..3222cf43bb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/DataGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/DataGenerator.java @@ -67,6 +67,7 @@ public class DataGenerator extends Configured implements Tool { * namespace. Afterwards it reads the file attributes and creates files * in the file. All file content is filled with 'a'. */ + @Override public int run(String[] args) throws Exception { int exitCode = 0; exitCode = init(args); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index ea192c4849..7490be80af 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -186,6 +186,7 @@ private DFSClientThread(int id) { /** Main loop * Each iteration decides what's the next operation and then pauses. */ + @Override public void run() { try { while (shouldRun) { @@ -281,6 +282,7 @@ private void list() throws IOException { * Before exiting, it prints the average execution for * each operation and operation throughput. */ + @Override public int run(String[] args) throws Exception { int exitCode = init(args); if (exitCode != 0) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/StructureGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/StructureGenerator.java index 689e01dbf3..71649a5941 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/StructureGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/StructureGenerator.java @@ -214,6 +214,7 @@ private FileINode(String name, double numOfBlocks) { } /** Output a file attribute */ + @Override protected void outputFiles(PrintStream out, String prefix) { prefix = (prefix == null)?super.name: prefix + "/"+super.name; out.println(prefix + " " + numOfBlocks); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java index 84d142e089..8024c6acc7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java @@ -47,34 +47,42 @@ class InMemoryFileSystemStore implements FileSystemStore { private SortedMap inodes = new TreeMap(); private Map blocks = new HashMap(); + @Override public void initialize(URI uri, Configuration conf) { this.conf = conf; } + @Override public String getVersion() throws IOException { return "0"; } + @Override public void deleteINode(Path path) throws IOException { inodes.remove(normalize(path)); } + @Override public void deleteBlock(Block block) throws IOException { blocks.remove(block.getId()); } + @Override public boolean inodeExists(Path path) throws IOException { return inodes.containsKey(normalize(path)); } + @Override public boolean blockExists(long blockId) throws IOException { return blocks.containsKey(blockId); } + @Override public INode retrieveINode(Path path) throws IOException { return inodes.get(normalize(path)); } + @Override public File retrieveBlock(Block block, long byteRangeStart) throws IOException { byte[] data = blocks.get(block.getId()); File file = createTempFile(); @@ -100,6 +108,7 @@ private File createTempFile() throws IOException { return result; } + @Override public Set listSubPaths(Path path) throws IOException { Path normalizedPath = normalize(path); // This is inefficient but more than adequate for testing purposes. @@ -112,6 +121,7 @@ public Set listSubPaths(Path path) throws IOException { return subPaths; } + @Override public Set listDeepSubPaths(Path path) throws IOException { Path normalizedPath = normalize(path); String pathString = normalizedPath.toUri().getPath(); @@ -128,10 +138,12 @@ public Set listDeepSubPaths(Path path) throws IOException { return subPaths; } + @Override public void storeINode(Path path, INode inode) throws IOException { inodes.put(normalize(path), inode); } + @Override public void storeBlock(Block block, File file) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] buf = new byte[8192]; @@ -157,11 +169,13 @@ private Path normalize(Path path) { return new Path(path.toUri().getPath()); } + @Override public void purge() throws IOException { inodes.clear(); blocks.clear(); } + @Override public void dump() throws IOException { StringBuilder sb = new StringBuilder(getClass().getSimpleName()); sb.append(", \n"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java index bc8ccc0f68..abac70ac1b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java @@ -55,15 +55,18 @@ class InMemoryNativeFileSystemStore implements NativeFileSystemStore { new TreeMap(); private SortedMap dataMap = new TreeMap(); + @Override public void initialize(URI uri, Configuration conf) throws IOException { this.conf = conf; } + @Override public void storeEmptyFile(String key) throws IOException { metadataMap.put(key, new FileMetadata(key, 0, Time.now())); dataMap.put(key, new byte[0]); } + @Override public void storeFile(String key, File file, byte[] md5Hash) throws IOException { @@ -86,10 +89,12 @@ public void storeFile(String key, File file, byte[] md5Hash) dataMap.put(key, out.toByteArray()); } + @Override public InputStream retrieve(String key) throws IOException { return retrieve(key, 0); } + @Override public InputStream retrieve(String key, long byteRangeStart) throws IOException { @@ -118,15 +123,18 @@ private File createTempFile() throws IOException { return result; } + @Override public FileMetadata retrieveMetadata(String key) throws IOException { return metadataMap.get(key); } + @Override public PartialListing list(String prefix, int maxListingLength) throws IOException { return list(prefix, maxListingLength, null, false); } + @Override public PartialListing list(String prefix, int maxListingLength, String priorLastKey, boolean recursive) throws IOException { @@ -165,16 +173,19 @@ private PartialListing list(String prefix, String delimiter, commonPrefixes.toArray(new String[0])); } + @Override public void delete(String key) throws IOException { metadataMap.remove(key); dataMap.remove(key); } + @Override public void copy(String srcKey, String dstKey) throws IOException { metadataMap.put(dstKey, metadataMap.get(srcKey)); dataMap.put(dstKey, dataMap.get(srcKey)); } + @Override public void purge(String prefix) throws IOException { Iterator> i = metadataMap.entrySet().iterator(); @@ -187,6 +198,7 @@ public void purge(String prefix) throws IOException { } } + @Override public void dump() throws IOException { System.out.println(metadataMap.values()); System.out.println(dataMap.keySet()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java index 44d7a4a7c1..e990b92465 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java @@ -347,6 +347,7 @@ static class MockFileSystem extends FilterFileSystem { MockFileSystem() { super(mock(FileSystem.class)); } + @Override public void initialize(URI name, Configuration conf) throws IOException {} } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java index 2f8d8ce848..de4b1e87ac 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java @@ -33,6 +33,7 @@ public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTest { static FileSystem fcTarget; + @Override @Before public void setUp() throws Exception { Configuration conf = new Configuration(); @@ -42,6 +43,7 @@ public void setUp() throws Exception { super.setUp(); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java index 39e3515d03..16b38b72ec 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcCreateMkdirLocalFs.java @@ -28,12 +28,14 @@ public class TestFcCreateMkdirLocalFs extends FileContextCreateMkdirBaseTest { + @Override @Before public void setUp() throws Exception { fc = ViewFsTestSetup.setupForViewFsLocalFs(); super.setUp(); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java index 235a182616..5641c9d70b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java @@ -36,6 +36,7 @@ public class TestFcMainOperationsLocalFs extends FileContext fclocal; Path targetOfTests; + @Override @Before public void setUp() throws Exception { /** @@ -79,6 +80,7 @@ public void setUp() throws Exception { super.setUp(); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java index 3e92eb9cc1..0e44be9be8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcPermissionsLocalFs.java @@ -27,12 +27,14 @@ public class TestFcPermissionsLocalFs extends FileContextPermissionBase { + @Override @Before public void setUp() throws Exception { fc = ViewFsTestSetup.setupForViewFsLocalFs(); super.setUp(); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java index e3f6e404a1..735dfcf3cf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegationTokenSupport.java @@ -160,6 +160,7 @@ Token[] addTokensWithCreds(FileSystem fs, Credentials creds) throws Exception static class FakeFileSystem extends RawLocalFileSystem { URI uri; + @Override public void initialize(URI name, Configuration conf) throws IOException { this.uri = name; } @@ -169,6 +170,7 @@ public Path getInitialWorkingDirectory() { return new Path("/"); // ctor calls getUri before the uri is inited... } + @Override public URI getUri() { return uri; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java index 8d4c38e1e6..4b45fc8c5b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java @@ -39,6 +39,7 @@ public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest { + @Override @Before public void setUp() throws Exception { // create the test root on local_fs @@ -47,6 +48,7 @@ public void setUp() throws Exception { } + @Override @After public void tearDown() throws Exception { fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java index 3ba3e002e0..4786cd5fdf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java @@ -42,6 +42,7 @@ public class TestViewFileSystemWithAuthorityLocalFileSystem extends ViewFileSystemBaseTest { URI schemeWithAuthority; + @Override @Before public void setUp() throws Exception { // create the test root on local_fs @@ -55,12 +56,14 @@ public void setUp() throws Exception { fsView = FileSystem.get(schemeWithAuthority, conf); } + @Override @After public void tearDown() throws Exception { fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true); super.tearDown(); } + @Override @Test public void testBasicPaths() { Assert.assertEquals(schemeWithAuthority, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java index 2a4488ce76..99bcf5d32b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLocalFs.java @@ -26,6 +26,7 @@ public class TestViewFsLocalFs extends ViewFsBaseTest { + @Override @Before public void setUp() throws Exception { // create the test root on local_fs @@ -34,6 +35,7 @@ public void setUp() throws Exception { } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java index 81270c2320..4325f40346 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java @@ -30,7 +30,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.mortbay.log.Log; public class TestViewFsTrash { FileSystem fsTarget; // the target file system - the mount will point here @@ -46,6 +45,7 @@ static class TestLFS extends LocalFileSystem { super(); this.home = home; } + @Override public Path getHomeDirectory() { return home; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java index 217d3fcd94..2e498f2c0a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java @@ -41,6 +41,7 @@ public class TestViewFsWithAuthorityLocalFs extends ViewFsBaseTest { URI schemeWithAuthority; + @Override @Before public void setUp() throws Exception { // create the test root on local_fs @@ -54,11 +55,13 @@ public void setUp() throws Exception { fcView = FileContext.getFileContext(schemeWithAuthority, conf); } + @Override @After public void tearDown() throws Exception { super.tearDown(); } + @Override @Test public void testBasicPaths() { Assert.assertEquals(schemeWithAuthority, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java index 4a576d08eb..9eec749336 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java @@ -23,7 +23,6 @@ import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java index 80612d9b78..9c68b282f6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java @@ -25,7 +25,6 @@ import java.io.OutputStream; import java.net.Socket; import java.util.ArrayList; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -82,6 +81,7 @@ public ClientBaseWithFixes() { * */ protected class NullWatcher implements Watcher { + @Override public void process(WatchedEvent event) { /* nada */ } } @@ -97,6 +97,7 @@ synchronized public void reset() { clientConnected = new CountDownLatch(1); connected = false; } + @Override synchronized public void process(WatchedEvent event) { if (event.getState() == KeeperState.SyncConnected || event.getState() == KeeperState.ConnectedReadOnly) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java index c38bc53424..0985af18c6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java @@ -184,6 +184,7 @@ public void close() throws IOException { } public static class DummyFencer implements FenceMethod { + @Override public void checkArgs(String args) throws BadFencingConfigurationException { } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java index d9b10ae091..eef6d7de41 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java @@ -46,14 +46,17 @@ public class TestGlobalFilter extends HttpServerFunctionalTest { static public class RecordingFilter implements Filter { private FilterConfig filterConfig = null; + @Override public void init(FilterConfig filterConfig) { this.filterConfig = filterConfig; } + @Override public void destroy() { this.filterConfig = null; } + @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (filterConfig == null) @@ -69,6 +72,7 @@ public void doFilter(ServletRequest request, ServletResponse response, static public class Initializer extends FilterInitializer { public Initializer() {} + @Override public void initFilter(FilterContainer container, Configuration conf) { container.addGlobalFilter("recording", RecordingFilter.class.getName(), null); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java index 73aebea486..3bd77f039c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java @@ -46,14 +46,17 @@ public class TestPathFilter extends HttpServerFunctionalTest { static public class RecordingFilter implements Filter { private FilterConfig filterConfig = null; + @Override public void init(FilterConfig filterConfig) { this.filterConfig = filterConfig; } + @Override public void destroy() { this.filterConfig = null; } + @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (filterConfig == null) @@ -69,6 +72,7 @@ public void doFilter(ServletRequest request, ServletResponse response, static public class Initializer extends FilterInitializer { public Initializer() {} + @Override public void initFilter(FilterContainer container, Configuration conf) { container.addFilter("recording", RecordingFilter.class.getName(), null); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java index 7bf608767e..a4d32531ce 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java @@ -45,14 +45,17 @@ public class TestServletFilter extends HttpServerFunctionalTest { static public class SimpleFilter implements Filter { private FilterConfig filterConfig = null; + @Override public void init(FilterConfig filterConfig) throws ServletException { this.filterConfig = filterConfig; } + @Override public void destroy() { this.filterConfig = null; } + @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (filterConfig == null) @@ -67,6 +70,7 @@ public void doFilter(ServletRequest request, ServletResponse response, static public class Initializer extends FilterInitializer { public Initializer() {} + @Override public void initFilter(FilterContainer container, Configuration conf) { container.addFilter("simple", SimpleFilter.class.getName(), null); } @@ -149,6 +153,7 @@ static public class Initializer extends FilterInitializer { public Initializer() { } + @Override public void initFilter(FilterContainer container, Configuration conf) { container.addFilter("simple", ErrorFilter.class.getName(), null); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/AvroTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/AvroTestUtil.java index e3e885ad12..74e9cc86bd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/AvroTestUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/AvroTestUtil.java @@ -18,12 +18,10 @@ package org.apache.hadoop.io; -import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.lang.reflect.Type; import org.apache.avro.Schema; -import org.apache.avro.io.BinaryEncoder; import org.apache.avro.io.EncoderFactory; import org.apache.avro.reflect.ReflectData; import org.apache.avro.reflect.ReflectDatumWriter; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/RandomDatum.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/RandomDatum.java index e97f2068be..8f99aab482 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/RandomDatum.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/RandomDatum.java @@ -40,11 +40,13 @@ public int getLength() { return length; } + @Override public void write(DataOutput out) throws IOException { out.writeInt(length); out.write(data); } + @Override public void readFields(DataInput in) throws IOException { length = in.readInt(); if (data == null || length > data.length) @@ -102,6 +104,7 @@ public Comparator() { super(RandomDatum.class); } + @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { int n1 = readInt(b1, s1); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestEnumSetWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestEnumSetWritable.java index 2ca6c87f8e..077c0b065d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestEnumSetWritable.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestEnumSetWritable.java @@ -19,17 +19,9 @@ package org.apache.hadoop.io; import java.io.IOException; -import java.io.ByteArrayOutputStream; import java.util.EnumSet; import java.lang.reflect.Type; -import org.apache.avro.Schema; -import org.apache.avro.reflect.ReflectData; -import org.apache.avro.reflect.ReflectDatumWriter; -import org.apache.avro.reflect.ReflectDatumReader; -import org.apache.avro.io.BinaryEncoder; -import org.apache.avro.io.DecoderFactory; - import junit.framework.TestCase; /** Unit test for EnumSetWritable */ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestGenericWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestGenericWritable.java index 486d93d438..880bba0e8b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestGenericWritable.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestGenericWritable.java @@ -48,9 +48,11 @@ protected void setUp() throws Exception { /** Dummy class for testing {@link GenericWritable} */ public static class Foo implements Writable { private String foo = "foo"; + @Override public void readFields(DataInput in) throws IOException { foo = Text.readString(in); } + @Override public void write(DataOutput out) throws IOException { Text.writeString(out, foo); } @@ -65,15 +67,19 @@ public boolean equals(Object obj) { public static class Bar implements Writable, Configurable { private int bar = 42; //The Answer to The Ultimate Question Of Life, the Universe and Everything private Configuration conf = null; + @Override public void readFields(DataInput in) throws IOException { bar = in.readInt(); } + @Override public void write(DataOutput out) throws IOException { out.writeInt(bar); } + @Override public Configuration getConf() { return conf; } + @Override public void setConf(Configuration conf) { this.conf = conf; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java index 86fefcf561..509d75e807 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java @@ -91,6 +91,7 @@ public void testMD5Hash() throws Exception { closeHash1.hashCode() != closeHash2.hashCode()); Thread t1 = new Thread() { + @Override public void run() { for (int i = 0; i < 100; i++) { MD5Hash hash = new MD5Hash(DFF); @@ -100,6 +101,7 @@ public void run() { }; Thread t2 = new Thread() { + @Override public void run() { for (int i = 0; i < 100; i++) { MD5Hash hash = new MD5Hash(D00); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java index bae0ccd836..a48fb6770b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java @@ -21,17 +21,14 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.io.nativeio.NativeIO; import org.junit.BeforeClass; -import org.junit.Before; import org.junit.Test; import static org.junit.Assume.*; import static org.junit.Assert.*; import java.io.IOException; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; public class TestSecureIOUtils { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java index fe33fefd91..1517c062b7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java @@ -481,6 +481,7 @@ private TestFSDataInputStream(InputStream in) throws IOException { super(in); } + @Override public void close() throws IOException { closed = true; super.close(); @@ -505,6 +506,7 @@ public void testCloseForErroneousSequenceFile() try { new SequenceFile.Reader(fs, path, conf) { // this method is called by the SequenceFile.Reader constructor, overwritten, so we can access the opened file + @Override protected FSDataInputStream openFile(FileSystem fs, Path file, int bufferSize, long length) throws IOException { final InputStream in = super.openFile(fs, file, bufferSize, length); openedFile[0] = new TestFSDataInputStream(in); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index 21da8c0dce..df9fb54032 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -284,6 +284,7 @@ public ConcurrentEncodeDecodeThread(String name) { super(name); } + @Override public void run() { String name = this.getName(); DataOutputBuffer out = new DataOutputBuffer(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestVersionedWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestVersionedWritable.java index df48f3cace..f7d45b9da7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestVersionedWritable.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestVersionedWritable.java @@ -37,16 +37,19 @@ public static class SimpleVersionedWritable extends VersionedWritable { private static byte VERSION = 1; + @Override public byte getVersion() { return VERSION; } + @Override public void write(DataOutput out) throws IOException { super.write(out); // version. out.writeInt(state); } + @Override public void readFields(DataInput in) throws IOException { super.readFields(in); // version this.state = in.readInt(); @@ -61,6 +64,7 @@ public static SimpleVersionedWritable read(DataInput in) throws IOException { /** Required by test code, below. */ + @Override public boolean equals(Object o) { if (!(o instanceof SimpleVersionedWritable)) return false; @@ -85,6 +89,7 @@ public static class AdvancedVersionedWritable extends SimpleVersionedWritable { SimpleVersionedWritable containedObject = new SimpleVersionedWritable(); String[] testStringArray = {"The", "Quick", "Brown", "Fox", "Jumped", "Over", "The", "Lazy", "Dog"}; + @Override public void write(DataOutput out) throws IOException { super.write(out); out.writeUTF(shortTestString); @@ -97,6 +102,7 @@ public void write(DataOutput out) throws IOException { } + @Override public void readFields(DataInput in) throws IOException { super.readFields(in); shortTestString = in.readUTF(); @@ -108,6 +114,7 @@ public void readFields(DataInput in) throws IOException { + @Override public boolean equals(Object o) { super.equals(o); @@ -134,6 +141,7 @@ public boolean equals(Object o) { /* This one checks that version mismatch is thrown... */ public static class SimpleVersionedWritableV2 extends SimpleVersionedWritable { static byte VERSION = 2; + @Override public byte getVersion() { return VERSION; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java index 31c237f872..971e237d50 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java @@ -38,10 +38,12 @@ public static class SimpleWritable implements Writable { int state = RANDOM.nextInt(); + @Override public void write(DataOutput out) throws IOException { out.writeInt(state); } + @Override public void readFields(DataInput in) throws IOException { this.state = in.readInt(); } @@ -53,6 +55,7 @@ public static SimpleWritable read(DataInput in) throws IOException { } /** Required by test code, below. */ + @Override public boolean equals(Object o) { if (!(o instanceof SimpleWritable)) return false; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableName.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableName.java index 7cb069ab00..396079c394 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableName.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableName.java @@ -39,10 +39,12 @@ public static class SimpleWritable implements Writable { int state = RANDOM.nextInt(); + @Override public void write(DataOutput out) throws IOException { out.writeInt(state); } + @Override public void readFields(DataInput in) throws IOException { this.state = in.readInt(); } @@ -54,6 +56,7 @@ public static SimpleWritable read(DataInput in) throws IOException { } /** Required by test code, below. */ + @Override public boolean equals(Object o) { if (!(o instanceof SimpleWritable)) return false; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java index 2caef859e6..280f1a8785 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java @@ -40,68 +40,81 @@ public Configuration getConf() { return conf; } + @Override public CompressionOutputStream createOutputStream(OutputStream out) throws IOException { return null; } + @Override public Class getCompressorType() { return null; } + @Override public Compressor createCompressor() { return null; } + @Override public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException { return null; } + @Override public CompressionInputStream createInputStream(InputStream in) throws IOException { return null; } + @Override public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException { return null; } + @Override public Class getDecompressorType() { return null; } + @Override public Decompressor createDecompressor() { return null; } + @Override public String getDefaultExtension() { return ".base"; } } private static class BarCodec extends BaseCodec { + @Override public String getDefaultExtension() { return "bar"; } } private static class FooBarCodec extends BaseCodec { + @Override public String getDefaultExtension() { return ".foo.bar"; } } private static class FooCodec extends BaseCodec { + @Override public String getDefaultExtension() { return ".foo"; } } private static class NewGzipCodec extends BaseCodec { + @Override public String getDefaultExtension() { return ".gz"; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/NanoTimer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/NanoTimer.java index 1584895407..c25c4dc427 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/NanoTimer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/NanoTimer.java @@ -93,6 +93,7 @@ public boolean isStarted() { * * Note: If timer is never started, "ERR" will be returned. */ + @Override public String toString() { if (!readable()) { return "ERR"; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java index 6242ea6b37..2682634516 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java @@ -35,7 +35,6 @@ import org.apache.hadoop.io.file.tfile.TFile.Writer; import org.apache.hadoop.io.file.tfile.TFile.Reader.Location; import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner; -import org.apache.hadoop.util.NativeCodeLoader; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsJClassComparatorByteArrays.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsJClassComparatorByteArrays.java index bd56d44965..7a2c2fc9c4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsJClassComparatorByteArrays.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsJClassComparatorByteArrays.java @@ -19,9 +19,6 @@ import java.io.IOException; -import org.apache.hadoop.io.RawComparator; -import org.apache.hadoop.io.WritableComparator; - /** * * Byte arrays test case class using GZ compression codec, base class of none diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java index 4e507259c0..3502198898 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java @@ -148,12 +148,14 @@ public TFileAppendable(FileSystem fs, Path path, String compress, this.writer = new TFile.Writer(fsdos, minBlkSize, compress, null, conf); } + @Override public void append(BytesWritable key, BytesWritable value) throws IOException { writer.append(key.get(), 0, key.getSize(), value.get(), 0, value .getSize()); } + @Override public void close() throws IOException { writer.close(); fsdos.close(); @@ -196,22 +198,27 @@ private void checkValueBuffer(int size) { - valueBuffer.length)]; } + @Override public byte[] getKey() { return keyBuffer; } + @Override public int getKeyLength() { return keyLength; } + @Override public byte[] getValue() { return valueBuffer; } + @Override public int getValueLength() { return valueLength; } + @Override public boolean next() throws IOException { if (scanner.atEnd()) return false; Entry entry = scanner.entry(); @@ -225,6 +232,7 @@ public boolean next() throws IOException { return true; } + @Override public void close() throws IOException { scanner.close(); reader.close(); @@ -266,11 +274,13 @@ else if (!"none".equals(compress)) } } + @Override public void append(BytesWritable key, BytesWritable value) throws IOException { writer.append(key, value); } + @Override public void close() throws IOException { writer.close(); fsdos.close(); @@ -291,26 +301,32 @@ public SeqFileReadable(FileSystem fs, Path path, int osBufferSize) value = new BytesWritable(); } + @Override public byte[] getKey() { return key.get(); } + @Override public int getKeyLength() { return key.getSize(); } + @Override public byte[] getValue() { return value.get(); } + @Override public int getValueLength() { return value.getSize(); } + @Override public boolean next() throws IOException { return reader.next(key, value); } + @Override public void close() throws IOException { reader.close(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index b9d4ec5690..acd728b0ec 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -88,6 +88,7 @@ public void testMultiThreadedFstat() throws Exception { List statters = new ArrayList(); for (int i = 0; i < 10; i++) { Thread statter = new Thread() { + @Override public void run() { long et = Time.now() + 5000; while (Time.now() < et) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java index 4949ef3140..77c9e30eed 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java @@ -252,6 +252,7 @@ public ConcurrentMethodThread(UnreliableInterface unreliable) { this.unreliable = unreliable; } + @Override public void run() { try { result = unreliable.failsIfIdentifierDoesntMatch("impl2"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java index 54fe677844..5b77698b10 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java @@ -19,7 +19,6 @@ import java.io.IOException; -import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; @@ -60,24 +59,29 @@ public UnreliableImplementation(String identifier, this.exceptionToFailWith = exceptionToFailWith; } + @Override public void alwaysSucceeds() { // do nothing } + @Override public void alwaysFailsWithFatalException() throws FatalException { throw new FatalException(); } + @Override public void alwaysFailsWithRemoteFatalException() throws RemoteException { throw new RemoteException(FatalException.class.getName(), "Oops"); } + @Override public void failsOnceThenSucceeds() throws UnreliableException { if (failsOnceInvocationCount++ == 0) { throw new UnreliableException(); } } + @Override public boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException { if (failsOnceWithValueInvocationCount++ == 0) { throw new UnreliableException(); @@ -85,6 +89,7 @@ public boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException return true; } + @Override public void failsTenTimesThenSucceeds() throws UnreliableException { if (failsTenTimesInvocationCount++ < 10) { throw new UnreliableException(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/Record.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/Record.java index 275a0dc1e2..4548c869f9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/Record.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/Record.java @@ -21,10 +21,12 @@ public class Record { public int x = 7; + @Override public int hashCode() { return x; } + @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/TestAvroSerialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/TestAvroSerialization.java index 181419c137..1926ec55e5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/TestAvroSerialization.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/TestAvroSerialization.java @@ -70,10 +70,12 @@ public void testReflect() throws Exception { public static class InnerRecord { public int x = 7; + @Override public int hashCode() { return x; } + @Override public boolean equals(Object obj) { if (this == obj) return true; @@ -91,10 +93,12 @@ public boolean equals(Object obj) { public static class RefSerializable implements AvroReflectSerializable { public int x = 7; + @Override public int hashCode() { return x; } + @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java index ace6173faa..a82419d5dd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java @@ -208,6 +208,7 @@ void connectToServerAndGetDelegationToken( try { client = proxyUserUgi.doAs(new PrivilegedExceptionAction() { + @Override public MiniProtocol run() throws IOException { MiniProtocol p = (MiniProtocol) RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf); @@ -235,6 +236,7 @@ long connectToServerUsingDelegationToken( long start = Time.now(); try { client = currentUgi.doAs(new PrivilegedExceptionAction() { + @Override public MiniProtocol run() throws IOException { return (MiniProtocol) RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index c7bc6411de..a0d6de0e9a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -130,6 +130,7 @@ public SerialCaller(Client client, InetSocketAddress server, int count) { this.count = count; } + @Override public void run() { for (int i = 0; i < count; i++) { try { @@ -219,6 +220,7 @@ static void maybeThrowRTE() { private static class IOEOnReadWritable extends LongWritable { public IOEOnReadWritable() {} + @Override public void readFields(DataInput in) throws IOException { super.readFields(in); maybeThrowIOE(); @@ -229,6 +231,7 @@ public void readFields(DataInput in) throws IOException { private static class RTEOnReadWritable extends LongWritable { public RTEOnReadWritable() {} + @Override public void readFields(DataInput in) throws IOException { super.readFields(in); maybeThrowRTE(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index e2e32c75ba..bf9fbc26d8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -106,17 +106,21 @@ DescriptorProtos.EnumDescriptorProto exchangeProto( public static class TestImpl implements TestProtocol { int fastPingCounter = 0; + @Override public long getProtocolVersion(String protocol, long clientVersion) { return TestProtocol.versionID; } + @Override public ProtocolSignature getProtocolSignature(String protocol, long clientVersion, int hashcode) { return new ProtocolSignature(TestProtocol.versionID, null); } + @Override public void ping() {} + @Override public synchronized void slowPing(boolean shouldSlow) { if (shouldSlow) { while (fastPingCounter < 2) { @@ -131,17 +135,22 @@ public synchronized void slowPing(boolean shouldSlow) { } } + @Override public String echo(String value) throws IOException { return value; } + @Override public String[] echo(String[] values) throws IOException { return values; } + @Override public Writable echo(Writable writable) { return writable; } + @Override public int add(int v1, int v2) { return v1 + v2; } + @Override public int add(int[] values) { int sum = 0; for (int i = 0; i < values.length; i++) { @@ -150,16 +159,19 @@ public int add(int[] values) { return sum; } + @Override public int error() throws IOException { throw new IOException("bobo"); } + @Override public void testServerGet() throws IOException { if (!(Server.get() instanceof RPC.Server)) { throw new IOException("Server.get() failed"); } } + @Override public int[] exchange(int[] values) { for (int i = 0; i < values.length; i++) { values[i] = i; @@ -186,6 +198,7 @@ static class Transactions implements Runnable { } // do two RPC that transfers data. + @Override public void run() { int[] indata = new int[datasize]; int[] outdata = null; @@ -220,6 +233,7 @@ boolean isDone() { return done; } + @Override public void run() { try { proxy.slowPing(true); // this would hang until two fast pings happened diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java index 50ae210ea9..e2b7707cd9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java @@ -284,6 +284,7 @@ public void testHashCode() throws Exception { "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1") public interface TestProtocol4 extends TestProtocol2 { public static final long versionID = 4L; + @Override int echo(int value) throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 9246fd5d72..014875440e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -136,15 +136,18 @@ public void write(DataOutput out) throws IOException { public static class TestTokenSecretManager extends SecretManager { + @Override public byte[] createPassword(TestTokenIdentifier id) { return id.getBytes(); } + @Override public byte[] retrievePassword(TestTokenIdentifier id) throws InvalidToken { return id.getBytes(); } + @Override public TestTokenIdentifier createIdentifier() { return new TestTokenIdentifier(); } @@ -152,6 +155,7 @@ public TestTokenIdentifier createIdentifier() { public static class BadTokenSecretManager extends TestTokenSecretManager { + @Override public byte[] retrievePassword(TestTokenIdentifier id) throws InvalidToken { throw new InvalidToken(ERROR_MESSAGE); @@ -186,6 +190,7 @@ public interface TestSaslProtocol extends TestRPC.TestProtocol { public static class TestSaslImpl extends TestRPC.TestImpl implements TestSaslProtocol { + @Override public AuthenticationMethod getAuthMethod() throws IOException { return UserGroupInformation.getCurrentUser().getAuthenticationMethod(); } @@ -450,6 +455,7 @@ public void testDigestAuthMethod() throws Exception { current.addToken(token); current.doAs(new PrivilegedExceptionAction() { + @Override public Object run() throws IOException { TestSaslProtocol proxy = null; try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics/TestMetricsServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics/TestMetricsServlet.java index 8d5cfc9a55..ec54f59686 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics/TestMetricsServlet.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics/TestMetricsServlet.java @@ -43,6 +43,7 @@ public class TestMetricsServlet extends TestCase { * Initializes, for testing, two NoEmitMetricsContext's, and adds one value * to the first of them. */ + @Override public void setUp() throws IOException { nc1 = new NoEmitMetricsContext(); nc1.init("test1", ContextFactory.getFactory()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsAnnotations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsAnnotations.java index 685fedc22c..5b75e33e31 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsAnnotations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsAnnotations.java @@ -21,8 +21,6 @@ import org.junit.Test; import static org.junit.Assert.*; import static org.mockito.Mockito.*; -import static org.apache.hadoop.test.MockitoMaker.*; - import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -145,6 +143,7 @@ static class HybridMetrics implements MetricsSource { @Metric int getG0() { return 0; } + @Override public void getMetrics(MetricsCollector collector, boolean all) { collector.addRecord("foo") .setContext("foocontext") @@ -183,6 +182,7 @@ static class BadHybridMetrics implements MetricsSource { @Metric MutableCounterInt c1; + @Override public void getMetrics(MetricsCollector collector, boolean all) { collector.addRecord("foo"); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsRegistry.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsRegistry.java index 1969ccee54..47b496fa57 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsRegistry.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMetricsRegistry.java @@ -51,6 +51,7 @@ public class TestMetricsRegistry { assertTrue("s1 found", r.get("s1") instanceof MutableStat); expectMetricsException("Metric name c1 already exists", new Runnable() { + @Override public void run() { r.newCounter("c1", "test dup", 0); } }); } @@ -70,10 +71,12 @@ public class TestMetricsRegistry { r.newGauge("g1", "test add", 1); expectMetricsException("Unsupported add", new Runnable() { + @Override public void run() { r.add("c1", 42); } }); expectMetricsException("Unsupported add", new Runnable() { + @Override public void run() { r.add("g1", 42); } }); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java index 379e9401d4..4204e2b624 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java @@ -21,10 +21,8 @@ import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; /** * Implements the {@link DNSToSwitchMapping} via static mappings. Used diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/FromCpp.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/FromCpp.java index 2cd2271f43..aeb68ea1de 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/FromCpp.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/FromCpp.java @@ -33,9 +33,11 @@ public FromCpp(String testName) { super(testName); } + @Override protected void setUp() throws Exception { } + @Override protected void tearDown() throws Exception { } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/RecordBench.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/RecordBench.java index 1cba75ed80..816d69ee26 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/RecordBench.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/RecordBench.java @@ -23,8 +23,6 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; -import java.lang.reflect.Array; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.Random; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordIO.java index 163ec1b00b..38eb9a0761 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordIO.java @@ -34,9 +34,11 @@ public TestRecordIO(String testName) { super(testName); } + @Override protected void setUp() throws Exception { } + @Override protected void tearDown() throws Exception { } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordVersioning.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordVersioning.java index 129ba2ced8..5977f03f85 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordVersioning.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordVersioning.java @@ -35,9 +35,11 @@ public TestRecordVersioning(String testName) { super(testName); } + @Override protected void setUp() throws Exception { } + @Override protected void tearDown() throws Exception { } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/ToCpp.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/ToCpp.java index d3c6385d74..7a3411e1ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/ToCpp.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/ToCpp.java @@ -33,9 +33,11 @@ public ToCpp(String testName) { super(testName); } + @Override protected void setUp() throws Exception { } + @Override protected void tearDown() throws Exception { } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java index 3c12047be2..d8138817e1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java @@ -22,7 +22,6 @@ import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; -import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -55,6 +54,7 @@ public void testConfiguration() throws Exception { FilterContainer container = Mockito.mock(FilterContainer.class); Mockito.doAnswer( new Answer() { + @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { Object[] args = invocationOnMock.getArguments(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java index d432623be0..72d02dbc6e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java @@ -29,13 +29,10 @@ import java.security.Key; import java.security.NoSuchAlgorithmException; import java.util.HashMap; -import java.util.List; import java.util.ArrayList; import java.util.Map; import java.util.Collection; -import static org.mockito.Mockito.mock; - import javax.crypto.KeyGenerator; import org.apache.hadoop.io.Text; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index ee7bc29d1e..de35cd2460 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -39,7 +39,6 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; import org.junit.Test; -import org.apache.hadoop.ipc.TestSaslRPC; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSelector; @@ -113,6 +112,7 @@ public void testCreateProxyUser() throws Exception { PROXY_USER_NAME, realUserUgi); UserGroupInformation curUGI = proxyUserUgi .doAs(new PrivilegedExceptionAction() { + @Override public UserGroupInformation run() throws IOException { return UserGroupInformation.getCurrentUser(); } @@ -131,10 +131,12 @@ public interface TestProtocol extends VersionedProtocol { public class TestImpl implements TestProtocol { + @Override public String aMethod() throws IOException { return UserGroupInformation.getCurrentUser().toString(); } + @Override public long getProtocolVersion(String protocol, long clientVersion) throws IOException { return TestProtocol.versionID; @@ -168,6 +170,7 @@ public void testRealUserSetup() throws IOException { PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi .doAs(new PrivilegedExceptionAction() { + @Override public String run() throws IOException { proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf); @@ -210,6 +213,7 @@ public void testRealUserAuthorizationSuccess() throws IOException { .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi .doAs(new PrivilegedExceptionAction() { + @Override public String run() throws IOException { proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf); @@ -257,6 +261,7 @@ public void testRealUserIPAuthorizationFailure() throws IOException { .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi .doAs(new PrivilegedExceptionAction() { + @Override public String run() throws IOException { proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf); @@ -296,6 +301,7 @@ public void testRealUserIPNotSpecified() throws IOException { .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi .doAs(new PrivilegedExceptionAction() { + @Override public String run() throws IOException { proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf); @@ -334,6 +340,7 @@ public void testRealUserGroupNotSpecified() throws IOException { .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi .doAs(new PrivilegedExceptionAction() { + @Override public String run() throws IOException { proxy = (TestProtocol) RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf); @@ -375,6 +382,7 @@ public void testRealUserGroupAuthorizationFailure() throws IOException { .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES); String retVal = proxyUserUgi .doAs(new PrivilegedExceptionAction() { + @Override public String run() throws IOException { proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index b284fe0c6a..48627276f8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -54,6 +54,7 @@ public static class FakeGroupMapping extends ShellBasedUnixGroupsMapping { private static Set allGroups = new HashSet(); private static Set blackList = new HashSet(); + @Override public List getGroups(String user) throws IOException { LOG.info("Getting groups for " + user); if (blackList.contains(user)) { @@ -62,6 +63,7 @@ public List getGroups(String user) throws IOException { return new LinkedList(allGroups); } + @Override public void cacheGroupsRefresh() throws IOException { LOG.info("Cache is being refreshed."); clearBlackList(); @@ -73,6 +75,7 @@ public static void clearBlackList() throws IOException { blackList.clear(); } + @Override public void cacheGroupsAdd(List groups) throws IOException { LOG.info("Adding " + groups + " to groups."); allGroups.addAll(groups); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestJNIGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestJNIGroupsMapping.java index e8b3a1c918..99c5c2a83f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestJNIGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestJNIGroupsMapping.java @@ -21,16 +21,11 @@ import java.util.Arrays; import java.util.List; -import java.util.SortedSet; -import java.util.TreeSet; - -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.security.JniBasedUnixGroupsMapping; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.hadoop.util.ReflectionUtils; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index 4d8224b7cc..ce8ee28207 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -92,6 +92,7 @@ public void testLogin() throws Exception { UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES); UserGroupInformation curUGI = userGroupInfo.doAs(new PrivilegedExceptionAction(){ + @Override public UserGroupInformation run() throws IOException { return UserGroupInformation.getCurrentUser(); }}); @@ -316,6 +317,7 @@ public void testUGITokens() throws Exception { // ensure that the tokens are passed through doAs Collection> otherSet = ugi.doAs(new PrivilegedExceptionAction>>(){ + @Override public Collection> run() throws IOException { return UserGroupInformation.getCurrentUser().getTokens(); } @@ -342,6 +344,7 @@ public void testTokenIdentifiers() throws Exception { // ensure that the token identifiers are passed through doAs Collection otherSet = ugi .doAs(new PrivilegedExceptionAction>() { + @Override public Collection run() throws IOException { return UserGroupInformation.getCurrentUser().getTokenIdentifiers(); } @@ -358,6 +361,7 @@ public void testUGIAuthMethod() throws Exception { ugi.setAuthenticationMethod(am); Assert.assertEquals(am, ugi.getAuthenticationMethod()); ugi.doAs(new PrivilegedExceptionAction() { + @Override public Object run() throws IOException { Assert.assertEquals(am, UserGroupInformation.getCurrentUser() .getAuthenticationMethod()); @@ -379,6 +383,7 @@ public void testUGIAuthMethodInRealUser() throws Exception { Assert.assertEquals(am, UserGroupInformation .getRealAuthenticationMethod(proxyUgi)); proxyUgi.doAs(new PrivilegedExceptionAction() { + @Override public Object run() throws IOException { Assert.assertEquals(AuthenticationMethod.PROXY, UserGroupInformation .getCurrentUser().getAuthenticationMethod()); @@ -451,6 +456,7 @@ public static void verifyLoginMetrics(long success, int failure) public void testUGIUnderNonHadoopContext() throws Exception { Subject nonHadoopSubject = new Subject(); Subject.doAs(nonHadoopSubject, new PrivilegedExceptionAction() { + @Override public Void run() throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); assertNotNull(ugi); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java index 39ff6808fe..32f1fa1501 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java @@ -22,13 +22,10 @@ import java.util.List; import org.junit.Test; -import org.junit.Before; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java index 6d7d695663..1741eb7477 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java @@ -18,8 +18,6 @@ package org.apache.hadoop.security.token; -import static junit.framework.Assert.assertEquals; - import java.io.*; import java.util.Arrays; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java index c1dd00a4d7..85e227921f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java @@ -19,7 +19,6 @@ package org.apache.hadoop.security.token.delegation; import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; @@ -47,7 +46,6 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation; import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.junit.Test; @@ -73,9 +71,11 @@ public Text getKind() { return KIND; } + @Override public void write(DataOutput out) throws IOException { super.write(out); } + @Override public void readFields(DataInput in) throws IOException { super.readFields(in); } @@ -231,6 +231,7 @@ public void testDelegationTokenSecretManager() throws Exception { dtSecretManager, "SomeUser", "JobTracker"); // Fake renewer should not be able to renew shouldThrow(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { dtSecretManager.renewToken(token, "FakeRenewer"); return null; @@ -259,6 +260,7 @@ public Object run() throws Exception { Thread.sleep(2000); shouldThrow(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { dtSecretManager.renewToken(token, "JobTracker"); return null; @@ -280,6 +282,7 @@ public void testCancelDelegationToken() throws Exception { generateDelegationToken(dtSecretManager, "SomeUser", "JobTracker"); //Fake renewer should not be able to renew shouldThrow(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { dtSecretManager.renewToken(token, "FakeCanceller"); return null; @@ -287,6 +290,7 @@ public Object run() throws Exception { }, AccessControlException.class); dtSecretManager.cancelToken(token, "JobTracker"); shouldThrow(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { dtSecretManager.renewToken(token, "JobTracker"); return null; @@ -379,6 +383,7 @@ public void testParallelDelegationTokenCreation() throws Exception { final int numTokensPerThread = 100; class tokenIssuerThread implements Runnable { + @Override public void run() { for(int i =0;i T execute(String user, final Configuration conf, final FileSystemExec getAuthority()); UserGroupInformation ugi = getUGI(user); return ugi.doAs(new PrivilegedExceptionAction() { + @Override public T run() throws Exception { FileSystem fs = createFileSystem(conf); Instrumentation instrumentation = getServer().get(Instrumentation.class); @@ -362,6 +363,7 @@ public FileSystem createFileSystemInternal(String user, final Configuration conf new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority()); UserGroupInformation ugi = getUGI(user); return ugi.doAs(new PrivilegedExceptionAction() { + @Override public FileSystem run() throws Exception { return createFileSystem(conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java index 9a36955d6a..ee4455c999 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java @@ -85,16 +85,19 @@ public void init() throws ServiceException { all.put("samplers", (Map) samplers); jvmVariables.put("free.memory", new VariableHolder(new Instrumentation.Variable() { + @Override public Long getValue() { return Runtime.getRuntime().freeMemory(); } })); jvmVariables.put("max.memory", new VariableHolder(new Instrumentation.Variable() { + @Override public Long getValue() { return Runtime.getRuntime().maxMemory(); } })); jvmVariables.put("total.memory", new VariableHolder(new Instrumentation.Variable() { + @Override public Long getValue() { return Runtime.getRuntime().totalMemory(); } @@ -162,6 +165,7 @@ static class Cron implements Instrumentation.Cron { long own; long total; + @Override public Cron start() { if (total != 0) { throw new IllegalStateException("Cron already used"); @@ -175,6 +179,7 @@ public Cron start() { return this; } + @Override public Cron stop() { if (total != 0) { throw new IllegalStateException("Cron already used"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java index f4e5bafece..2da7f24ec3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java @@ -96,6 +96,7 @@ public void schedule(final Callable callable, long delay, long interval, Time LOG.debug("Scheduling callable [{}], interval [{}] seconds, delay [{}] in [{}]", new Object[]{callable, delay, interval, unit}); Runnable r = new Runnable() { + @Override public void run() { String instrName = callable.getClass().getSimpleName(); Instrumentation instr = getServer().get(Instrumentation.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java index c56f6e4968..b040054267 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java @@ -147,6 +147,7 @@ static String getDir(String name, String dirType, String defaultDir) { * * @param event servelt context event. */ + @Override public void contextInitialized(ServletContextEvent event) { try { init(); @@ -194,6 +195,7 @@ protected InetSocketAddress resolveAuthority() throws ServerException { * * @param event servelt context event. */ + @Override public void contextDestroyed(ServletContextEvent event) { destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java index bce8c3b0d9..f126032976 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java @@ -29,6 +29,7 @@ public BooleanParam(String name, Boolean defaultValue) { super(name, defaultValue); } + @Override protected Boolean parse(String str) throws Exception { if (str.equalsIgnoreCase("true")) { return true; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java index b0e1173572..bc2c4a54c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java @@ -27,6 +27,7 @@ public ByteParam(String name, Byte defaultValue) { super(name, defaultValue); } + @Override protected Byte parse(String str) throws Exception { return Byte.parseByte(str); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java index d76db629b2..8baef67e8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java @@ -32,6 +32,7 @@ public EnumParam(String name, Class e, E defaultValue) { klass = e; } + @Override protected E parse(String str) throws Exception { return Enum.valueOf(klass, str.toUpperCase()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java index faa99a440e..b7b08f6a9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java @@ -27,6 +27,7 @@ public IntegerParam(String name, Integer defaultValue) { super(name, defaultValue); } + @Override protected Integer parse(String str) throws Exception { return Integer.parseInt(str); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java index c2399bf76b..11bf082060 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java @@ -27,6 +27,7 @@ public LongParam(String name, Long defaultValue) { super(name, defaultValue); } + @Override protected Long parse(String str) throws Exception { return Long.parseLong(str); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java index f73c52fd5a..8af5373a3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java @@ -55,6 +55,7 @@ public T value() { protected abstract T parse(String str) throws Exception; + @Override public String toString() { return (value != null) ? value.toString() : "NULL"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java index 7986e72bdb..7d700c1744 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java @@ -34,6 +34,7 @@ public ShortParam(String name, Short defaultValue) { this(name, defaultValue, 10); } + @Override protected Short parse(String str) throws Exception { return Short.parseShort(str, radix); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java index 85bee1c901..1695eb3aa2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java @@ -36,6 +36,7 @@ public StringParam(String name, String defaultValue, Pattern pattern) { parseParam(defaultValue); } + @Override public String parseParam(String str) { try { if (str != null) { @@ -52,6 +53,7 @@ public String parseParam(String str) { return value; } + @Override protected String parse(String str) throws Exception { if (pattern != null) { if (!pattern.matcher(str).matches()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java index 87b1420996..0cb0cc64b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java @@ -47,14 +47,17 @@ public TestHttpFSFileSystemLocalFileSystem(Operation operation) { super(operation); } + @Override protected Path getProxiedFSTestDir() { return addPrefix(new Path(TestDirHelper.getTestDir().getAbsolutePath())); } + @Override protected String getProxiedFSURI() { return "file:///"; } + @Override protected Configuration getProxiedFSConf() { Configuration conf = new Configuration(false); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, getProxiedFSURI()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java index fa0a7555a7..b211e9a466 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java @@ -32,19 +32,23 @@ public TestHttpFSWithHttpFSFileSystem(Operation operation) { super(operation); } + @Override protected Class getFileSystemClass() { return HttpFSFileSystem.class; } + @Override protected Path getProxiedFSTestDir() { return TestHdfsHelper.getHdfsTestDir(); } + @Override protected String getProxiedFSURI() { return TestHdfsHelper.getHdfsConf().get( CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); } + @Override protected Configuration getProxiedFSConf() { return TestHdfsHelper.getHdfsConf(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java index a32671854c..db4cdeeadb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java @@ -34,7 +34,6 @@ import org.apache.hadoop.test.HFSTestCase; import org.apache.hadoop.test.TestDir; import org.apache.hadoop.test.TestDirHelper; -import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java index f4996de542..eb2cdc6142 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java @@ -79,6 +79,7 @@ public void testDirAnnotation() throws Exception { public void waitFor() { long start = Time.now(); long waited = waitFor(1000, new Predicate() { + @Override public boolean evaluate() throws Exception { return true; } @@ -93,6 +94,7 @@ public void waitForTimeOutRatio1() { setWaitForRatio(1); long start = Time.now(); long waited = waitFor(200, new Predicate() { + @Override public boolean evaluate() throws Exception { return false; } @@ -107,6 +109,7 @@ public void waitForTimeOutRatio2() { setWaitForRatio(2); long start = Time.now(); long waited = waitFor(200, new Predicate() { + @Override public boolean evaluate() throws Exception { return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java index 10c798f3fa..74d34ec80e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java @@ -64,6 +64,7 @@ public void testDirAnnotation() throws Exception { public void waitFor() { long start = Time.now(); long waited = waitFor(1000, new Predicate() { + @Override public boolean evaluate() throws Exception { return true; } @@ -78,6 +79,7 @@ public void waitForTimeOutRatio1() { setWaitForRatio(1); long start = Time.now(); long waited = waitFor(200, new Predicate() { + @Override public boolean evaluate() throws Exception { return false; } @@ -92,6 +94,7 @@ public void waitForTimeOutRatio2() { setWaitForRatio(2); long start = Time.now(); long waited = waitFor(200, new Predicate() { + @Override public boolean evaluate() throws Exception { return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java index 2afd7d35a4..26d253fecb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java @@ -31,6 +31,7 @@ public class TestHdfsHelper extends TestDirHelper { + @Override @Test public void dummy() { } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ba2a8b7564..0f4c5263d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -126,6 +126,9 @@ Trunk (unreleased changes) HDFS-3819. Should check whether invalidate work percentage default value is not greater than 1.0f. (Jing Zhao via jitendra) + HDFS-3844. Add @Override and remove {@inheritdoc} and unnecessary + imports. (Jing Zhao via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 2386c84130..222d454a70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -312,9 +312,6 @@ public FileStatus[] listStatus(Path f) return listing.toArray(new FileStatus[listing.size()]); } - /** - * {@inheritDoc} - */ @Override public RemoteIterator listCorruptFileBlocks(Path path) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java index 4150c5c6fe..438d56e52f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java @@ -23,7 +23,6 @@ import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URISyntaxException; import java.net.URL; import java.security.KeyStore; import java.security.cert.X509Certificate; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 39a9b3086a..c24a59b87d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -23,7 +23,6 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.Socket; @@ -35,7 +34,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver; @@ -47,8 +45,6 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.SocketInputWrapper; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java index f3575c4caa..ac6adfefb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java @@ -118,7 +118,6 @@ static boolean isEqual(Object a, Object b) { return a == null ? b == null : a.equals(b); } - /** {@inheritDoc} */ @Override public boolean equals(Object obj) { if (obj == this) { @@ -135,7 +134,6 @@ && isEqual(this.blockPoolId, that.blockPoolId) return false; } - /** {@inheritDoc} */ @Override public int hashCode() { return (int) expiryDate ^ keyId ^ (int) blockId ^ modes.hashCode() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 1558b4a8f5..01ee2a1222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index d7c4d8a305..00b0b0723b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -101,9 +101,6 @@ protected DatanodeDescriptor chooseLocalNode( blocksize, maxNodesPerRack, results); } - /** - * {@inheritDoc} - */ @Override protected void adjustExcludedNodes(HashMap excludedNodes, Node chosenNode) { @@ -121,9 +118,6 @@ private void addNodeGroupToExcludedNodes(HashMap excludedNodes, } } - /** - * {@inheritDoc} - */ @Override protected DatanodeDescriptor chooseLocalRack( DatanodeDescriptor localMachine, @@ -172,9 +166,6 @@ protected DatanodeDescriptor chooseLocalRack( } } - /** - * {@inheritDoc} - */ @Override protected void chooseRemoteRack(int numOfReplicas, DatanodeDescriptor localMachine, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 2cab5e207f..f7594284a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index 8f921bde1f..73926010a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java index b7da116048..860d1d261f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java @@ -22,11 +22,7 @@ import java.util.Queue; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; - -import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.google.common.collect.Maps; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 60a1216d12..831f3430be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -44,7 +44,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -60,14 +59,12 @@ import org.apache.hadoop.hdfs.web.resources.DoAsParam; import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.http.HtmlQuoting; -import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authentication.util.KerberosName; -import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index ca596a2b0e..f89fbde121 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -859,6 +859,7 @@ public interface FormatConfirmable { * @return a string representation of the formattable item, suitable * for display to the user inside a prompt */ + @Override public String toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index 2d1ff6437b..4393ec7bca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -16,8 +16,6 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION; - import java.net.InetSocketAddress; import java.net.ServerSocket; import java.nio.channels.ServerSocketChannel; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java index de80f80cf2..3816dc10d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java @@ -22,9 +22,6 @@ import java.io.Closeable; import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - /** * A generic abstract class to support reading edits log data from * persistent storage. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 2b4d3cba91..ad7f71cfe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -29,12 +29,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; -import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.Storage; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 1a2b05a793..7dac687fc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 66679b05fb..e1882d9481 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index d6453fa8b5..a8df0f706c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.ShortWritable; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 44b0437d13..7090f455d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -18,13 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT; - import java.io.IOException; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; @@ -34,7 +29,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.web.AuthFilter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java index 7a30869290..eb6a8ea1c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java @@ -20,13 +20,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; - -import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.protocol.HdfsConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java index e12ce698f3..6897e353ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java @@ -75,7 +75,7 @@ T get(int i) { return t; } - /** {@inheritDoc} */ + @Override public String toString() { return "max=" + max + ",\n t2i=" + t2i + ",\n i2t=" + i2t; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java index 8030f2817e..3fd1dc26a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; -import java.io.FileWriter; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index 2aade9eb14..0c8ac6353c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -31,13 +31,11 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; -import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.security.token.delegation.DelegationKey; -import org.xml.sax.helpers.AttributesImpl; /** * ImageLoaderCurrent processes Hadoop FSImage files and walks over diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java index 4685a2e6ae..6045615edc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java @@ -53,7 +53,6 @@ public CyclicIteration(NavigableMap navigablemap, K startingkey) { } } - /** {@inheritDoc} */ @Override public Iterator> iterator() { return new CyclicIterator(); @@ -89,13 +88,11 @@ private Map.Entry nextEntry() { return i.next(); } - /** {@inheritDoc} */ @Override public boolean hasNext() { return hasnext; } - /** {@inheritDoc} */ @Override public Map.Entry next() { if (!hasnext) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java index 4724595d4a..893e0b7cb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java @@ -271,7 +271,6 @@ public void run(DatanodeID id) throws IOException { } } - /** {@inheritDoc} */ @Override public String toString() { return error + " " + super.toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java index 8f4f9c2be6..888fadf5dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java @@ -51,9 +51,6 @@ public ReceivedCheckAction(String name) { this.name = name; } - /** - * {@inheritDoc} - */ @Override public void run(NodeBytes nb) throws IOException { synchronized (rcv) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java index bea29f9c67..cec0c594a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java @@ -19,7 +19,6 @@ import static org.junit.Assert.*; -import java.io.DataOutputStream; import java.io.FileNotFoundException; import java.io.IOException; @@ -29,9 +28,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java index a941ae424d..43bd7a4153 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java @@ -27,7 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java index 6119584c0d..f3925c963c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java @@ -23,10 +23,7 @@ import org.junit.Test; import org.junit.Before; -import org.junit.After; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileUtil; @@ -35,9 +32,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import java.util.Properties; -import java.io.FileReader; -import java.io.FileWriter; import org.junit.Assert; import org.apache.hadoop.test.GenericTestUtils; From ffd2e01604be814fa3db1dded7cd7cff26a79b1e Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Sat, 25 Aug 2012 02:18:49 +0000 Subject: [PATCH 04/62] YARN-39. RM-NM secret-keys should be randomly generated and rolled every so often. (Contributed by Vinod Kumar Vavilapalli and Siddharth Seth) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377180 13f79535-47bb-0310-9956-ffa450edef68 --- .../v2/app/TestRMContainerAllocator.java | 4 +- hadoop-yarn-project/CHANGES.txt | 3 + .../hadoop/yarn/conf/YarnConfiguration.java | 14 +- .../security/ContainerTokenIdentifier.java | 10 +- .../src/main/resources/yarn-default.xml | 11 + .../server/api/records/HeartbeatResponse.java | 5 +- .../yarn/server/api/records/MasterKey.java | 33 +++ .../api/records/RegistrationResponse.java | 11 +- .../impl/pb/HeartbeatResponsePBImpl.java | 38 +++- .../api/records/impl/pb/MasterKeyPBImpl.java | 102 +++++++++ .../impl/pb/RegistrationResponsePBImpl.java | 39 ++-- .../BaseContainerTokenSecretManager.java | 202 ++++++++++++++++++ .../security/ContainerTokenSecretManager.java | 118 ---------- .../proto/yarn_server_common_protos.proto | 14 +- .../yarn/server/nodemanager/Context.java | 3 + .../yarn/server/nodemanager/NodeManager.java | 35 +-- .../server/nodemanager/NodeStatusUpdater.java | 2 - .../nodemanager/NodeStatusUpdaterImpl.java | 44 ++-- .../ContainerManagerImpl.java | 141 +++++++----- .../application/ApplicationImpl.java | 8 +- .../NMContainerTokenSecretManager.java | 189 ++++++++++++++++ .../nodemanager/DummyContainerManager.java | 6 +- .../server/nodemanager/TestEventFlow.java | 19 +- .../server/nodemanager/TestNMAuditLogger.java | 18 +- .../nodemanager/TestNodeStatusUpdater.java | 47 ++-- .../BaseContainerManagerTest.java | 15 +- .../TestContainerManager.java | 11 +- .../application/TestApplication.java | 4 +- .../nodemanager/webapp/TestNMWebServer.java | 4 +- .../nodemanager/webapp/TestNMWebServices.java | 2 +- .../webapp/TestNMWebServicesApps.java | 2 +- .../webapp/TestNMWebServicesContainers.java | 2 +- .../server/resourcemanager/AdminService.java | 5 +- .../server/resourcemanager/RMContext.java | 3 + .../server/resourcemanager/RMContextImpl.java | 11 +- .../resourcemanager/ResourceManager.java | 26 +-- .../ResourceTrackerService.java | 52 +++-- .../server/resourcemanager/rmnode/RMNode.java | 5 +- .../resourcemanager/rmnode/RMNodeImpl.java | 21 +- .../rmnode/RMNodeStatusEvent.java | 9 +- .../scheduler/ResourceScheduler.java | 5 +- .../scheduler/capacity/CapacityScheduler.java | 15 +- .../capacity/CapacitySchedulerContext.java | 4 +- .../scheduler/capacity/LeafQueue.java | 4 +- .../scheduler/fair/AppSchedulable.java | 4 +- .../scheduler/fair/FairScheduler.java | 15 +- .../scheduler/fifo/FifoScheduler.java | 13 +- .../RMContainerTokenSecretManager.java | 153 +++++++++++++ .../server/resourcemanager/MockNodes.java | 6 + .../resourcemanager/TestAppManager.java | 2 +- .../resourcemanager/TestFifoScheduler.java | 5 +- .../resourcemanager/TestRMAuditLogger.java | 17 +- .../TestRMNodeTransitions.java | 4 +- .../applicationsmanager/TestAMRestart.java | 2 +- .../TestSchedulerNegotiator.java | 2 +- .../resourcetracker/TestNMExpiry.java | 11 +- .../TestRMNMRPCResponseId.java | 9 +- .../rmapp/TestRMAppTransitions.java | 9 +- .../attempt/TestRMAppAttemptTransitions.java | 12 +- .../capacity/TestCapacityScheduler.java | 13 +- .../scheduler/capacity/TestLeafQueue.java | 2 +- .../scheduler/capacity/TestQueueParsing.java | 13 +- .../scheduler/capacity/TestUtils.java | 5 +- .../scheduler/fair/TestFairScheduler.java | 20 +- .../scheduler/fifo/TestFifoScheduler.java | 4 +- .../resourcemanager/webapp/TestRMWebApp.java | 8 +- .../webapp/TestRMWebServicesNodes.java | 4 +- .../hadoop/yarn/server/MiniYARNCluster.java | 7 +- .../server/TestContainerManagerSecurity.java | 26 ++- 69 files changed, 1208 insertions(+), 474 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/MasterKey.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/MasterKeyPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/ContainerTokenSecretManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java index e252c6f6dc..c1146e0989 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java @@ -93,7 +93,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.After; import org.junit.Test; @@ -1099,8 +1098,7 @@ public MyFifoScheduler(RMContext rmContext) { super(); try { Configuration conf = new Configuration(); - reinitialize(conf, new ContainerTokenSecretManager(conf), - rmContext); + reinitialize(conf, rmContext); } catch (IOException ie) { LOG.info("add application failed with ", ie); assert (false); diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index bad58d6006..ca1810ed50 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -59,3 +59,6 @@ Release 0.23.3 - Unreleased refreshing of queues (Arun Murthy via tgraves) MAPREDUCE-4323. NM leaks filesystems (Jason Lowe via jeagles) + + YARN-39. RM-NM secret-keys should be randomly generated and rolled every + so often. (vinodkv and sseth via sseth) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 816df428d8..95a1a6ca82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -18,16 +18,16 @@ package org.apache.hadoop.yarn.conf; -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; -import java.util.Iterator; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; + public class YarnConfiguration extends Configuration { private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults(); private static final Joiner JOINER = Joiner.on(""); @@ -262,6 +262,12 @@ public class YarnConfiguration extends Configuration { public static final long DEFAULT_RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = 24 * 60 * 60; + public static final String RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = + RM_PREFIX + "container-tokens.master-key-rolling-interval-secs"; + + public static final long DEFAULT_RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = + 24 * 60 * 60; + //////////////////////////////// // Node Manager Configs //////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java index eb8857655a..e58f584e82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java @@ -50,13 +50,15 @@ public class ContainerTokenIdentifier extends TokenIdentifier { private String nmHostAddr; private Resource resource; private long expiryTimeStamp; + private int masterKeyId; public ContainerTokenIdentifier(ContainerId containerID, String hostName, - Resource r, long expiryTimeStamp) { + Resource r, long expiryTimeStamp, int masterKeyId) { this.containerId = containerID; this.nmHostAddr = hostName; this.resource = r; this.expiryTimeStamp = expiryTimeStamp; + this.masterKeyId = masterKeyId; } /** @@ -81,6 +83,10 @@ public long getExpiryTimeStamp() { return this.expiryTimeStamp; } + public int getMasterKeyId() { + return this.masterKeyId; + } + @Override public void write(DataOutput out) throws IOException { LOG.debug("Writing ContainerTokenIdentifier to RPC layer: " + this); @@ -94,6 +100,7 @@ public void write(DataOutput out) throws IOException { out.writeUTF(this.nmHostAddr); out.writeInt(this.resource.getMemory()); out.writeLong(this.expiryTimeStamp); + out.writeInt(this.masterKeyId); } @Override @@ -107,6 +114,7 @@ public void readFields(DataInput in) throws IOException { this.nmHostAddr = in.readUTF(); this.resource = BuilderUtils.newResource(in.readInt()); this.expiryTimeStamp = in.readLong(); + this.masterKeyId = in.readInt(); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 0408ee1644..8df3d22cc8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -239,6 +239,17 @@ 86400 + + Interval for the roll over for the master key used to generate + container tokens. It is expected to be much greater than + yarn.nm.liveness-monitor.expiry-interval-ms and + yarn.rm.container-allocation.expiry-interval-ms. Otherwise the + behavior is undefined. + + yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs + 86400 + + The address of the container manager in the NM. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/HeartbeatResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/HeartbeatResponse.java index 50e45d49f8..4536934054 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/HeartbeatResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/HeartbeatResponse.java @@ -36,7 +36,10 @@ public interface HeartbeatResponse { void setResponseId(int responseId); void setNodeAction(NodeAction action); - + + MasterKey getMasterKey(); + void setMasterKey(MasterKey secretKey); + void addAllContainersToCleanup(List containers); void addContainerToCleanup(ContainerId container); void removeContainerToCleanup(int index); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/MasterKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/MasterKey.java new file mode 100644 index 0000000000..d951e3dc16 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/MasterKey.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.records; + +import java.nio.ByteBuffer; + +public interface MasterKey { + + int getKeyId(); + + void setKeyId(int keyId); + + ByteBuffer getBytes(); + + void setBytes(ByteBuffer bytes); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java index f4bb31a2c6..2d4ee65255 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.yarn.server.api.records; -import java.nio.ByteBuffer; - public interface RegistrationResponse { - public abstract ByteBuffer getSecretKey(); + + MasterKey getMasterKey(); - public abstract void setSecretKey(ByteBuffer secretKey); + void setMasterKey(MasterKey secretKey); - public abstract NodeAction getNodeAction(); + NodeAction getNodeAction(); - public abstract void setNodeAction(NodeAction nodeAction); + void setNodeAction(NodeAction nodeAction); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/HeartbeatResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/HeartbeatResponsePBImpl.java index 7cf7ac8bcb..8a7d890aeb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/HeartbeatResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/HeartbeatResponsePBImpl.java @@ -32,8 +32,10 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.HeartbeatResponseProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.HeartbeatResponseProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeActionProto; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeAction; public class HeartbeatResponsePBImpl extends @@ -43,9 +45,8 @@ public class HeartbeatResponsePBImpl extends boolean viaProto = false; private List containersToCleanup = null; - private List applicationsToCleanup = null; - + private MasterKey masterKey = null; public HeartbeatResponsePBImpl() { builder = HeartbeatResponseProto.newBuilder(); @@ -71,6 +72,9 @@ private void mergeLocalToBuilder() { if (this.applicationsToCleanup != null) { addApplicationsToCleanupToProto(); } + if (this.masterKey != null) { + builder.setMasterKey(convertToProtoFormat(this.masterKey)); + } } private void mergeLocalToProto() { @@ -100,6 +104,28 @@ public void setResponseId(int responseId) { maybeInitBuilder(); builder.setResponseId((responseId)); } + + @Override + public MasterKey getMasterKey() { + HeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder; + if (this.masterKey != null) { + return this.masterKey; + } + if (!p.hasMasterKey()) { + return null; + } + this.masterKey = convertFromProtoFormat(p.getMasterKey()); + return this.masterKey; + } + + @Override + public void setMasterKey(MasterKey masterKey) { + maybeInitBuilder(); + if (masterKey == null) + builder.clearMasterKey(); + this.masterKey = masterKey; + } + @Override public NodeAction getNodeAction() { HeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder; @@ -313,4 +339,12 @@ private NodeAction convertFromProtoFormat(NodeActionProto p) { private NodeActionProto convertToProtoFormat(NodeAction t) { return NodeActionProto.valueOf(t.name()); } + + private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) { + return new MasterKeyPBImpl(p); + } + + private MasterKeyProto convertToProtoFormat(MasterKey t) { + return ((MasterKeyPBImpl)t).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/MasterKeyPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/MasterKeyPBImpl.java new file mode 100644 index 0000000000..df7e94ac8e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/MasterKeyPBImpl.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.records.impl.pb; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.yarn.api.records.ProtoBase; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.records.MasterKey; + +public class MasterKeyPBImpl extends ProtoBase implements + MasterKey { + MasterKeyProto proto = MasterKeyProto.getDefaultInstance(); + MasterKeyProto.Builder builder = null; + boolean viaProto = false; + + public MasterKeyPBImpl() { + builder = MasterKeyProto.newBuilder(); + } + + public MasterKeyPBImpl(MasterKeyProto proto) { + this.proto = proto; + viaProto = true; + } + + public synchronized MasterKeyProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private synchronized void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = MasterKeyProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public synchronized int getKeyId() { + MasterKeyProtoOrBuilder p = viaProto ? proto : builder; + return (p.getKeyId()); + } + + @Override + public synchronized void setKeyId(int id) { + maybeInitBuilder(); + builder.setKeyId((id)); + } + + @Override + public synchronized ByteBuffer getBytes() { + MasterKeyProtoOrBuilder p = viaProto ? proto : builder; + return convertFromProtoFormat(p.getBytes()); + } + + @Override + public synchronized void setBytes(ByteBuffer bytes) { + maybeInitBuilder(); + builder.setBytes(convertToProtoFormat(bytes)); + } + + @Override + public int hashCode() { + return getKeyId(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (!(obj instanceof MasterKey)) { + return false; + } + MasterKey other = (MasterKey) obj; + if (this.getKeyId() != other.getKeyId()) { + return false; + } + if (!this.getBytes().equals(other.getBytes())) { + return false; + } + return true; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java index 21d708b14b..1619e5432f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java @@ -19,12 +19,12 @@ package org.apache.hadoop.yarn.server.api.records.impl.pb; -import java.nio.ByteBuffer; - import org.apache.hadoop.yarn.api.records.ProtoBase; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeActionProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.RegistrationResponseProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.RegistrationResponseProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeAction; import org.apache.hadoop.yarn.server.api.records.RegistrationResponse; @@ -34,7 +34,7 @@ public class RegistrationResponsePBImpl extends RegistrationResponseProto.Builder builder = null; boolean viaProto = false; - private ByteBuffer secretKey = null; + private MasterKey masterKey = null; public RegistrationResponsePBImpl() { builder = RegistrationResponseProto.newBuilder(); @@ -54,8 +54,8 @@ public RegistrationResponseProto getProto() { } private void mergeLocalToBuilder() { - if (this.secretKey != null) { - builder.setSecretKey(convertToProtoFormat(this.secretKey)); + if (this.masterKey != null) { + builder.setMasterKey(convertToProtoFormat(this.masterKey)); } } @@ -76,26 +76,26 @@ private void maybeInitBuilder() { } @Override - public ByteBuffer getSecretKey() { + public MasterKey getMasterKey() { RegistrationResponseProtoOrBuilder p = viaProto ? proto : builder; - if (this.secretKey != null) { - return this.secretKey; + if (this.masterKey != null) { + return this.masterKey; } - if (!p.hasSecretKey()) { + if (!p.hasMasterKey()) { return null; } - this.secretKey = convertFromProtoFormat(p.getSecretKey()); - return this.secretKey; + this.masterKey = convertFromProtoFormat(p.getMasterKey()); + return this.masterKey; } @Override - public void setSecretKey(ByteBuffer secretKey) { + public void setMasterKey(MasterKey masterKey) { maybeInitBuilder(); - if (secretKey == null) - builder.clearSecretKey(); - this.secretKey = secretKey; + if (masterKey == null) + builder.clearMasterKey(); + this.masterKey = masterKey; } - + @Override public NodeAction getNodeAction() { RegistrationResponseProtoOrBuilder p = viaProto ? proto : builder; @@ -123,4 +123,11 @@ private NodeActionProto convertToProtoFormat(NodeAction t) { return NodeActionProto.valueOf(t.name()); } + private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) { + return new MasterKeyPBImpl(p); + } + + private MasterKeyProto convertToProtoFormat(MasterKey t) { + return ((MasterKeyPBImpl)t).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java new file mode 100644 index 0000000000..16f4b6f234 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java @@ -0,0 +1,202 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.security; + +import java.nio.ByteBuffer; +import java.security.SecureRandom; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import javax.crypto.SecretKey; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerToken; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.util.BuilderUtils; +import org.apache.hadoop.yarn.util.Records; + +/** + * SecretManager for ContainerTokens. Extended by both RM and NM and hence is + * present in yarn-server-common package. + * + */ +public class BaseContainerTokenSecretManager extends + SecretManager { + + private static Log LOG = LogFactory + .getLog(BaseContainerTokenSecretManager.class); + + private int serialNo = new SecureRandom().nextInt(); + + protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + protected final Lock readLock = readWriteLock.readLock(); + protected final Lock writeLock = readWriteLock.writeLock(); + + /** + * THE masterKey. ResourceManager should persist this and recover it on + * restart instead of generating a new key. The NodeManagers get it from the + * ResourceManager and use it for validating container-tokens. + */ + protected MasterKeyData currentMasterKey; + + protected final class MasterKeyData { + + private final MasterKey masterKeyRecord; + // Underlying secret-key also stored to avoid repetitive encoding and + // decoding the masterKeyRecord bytes. + private final SecretKey generatedSecretKey; + + private MasterKeyData() { + this.masterKeyRecord = Records.newRecord(MasterKey.class); + this.masterKeyRecord.setKeyId(serialNo++); + this.generatedSecretKey = generateSecret(); + this.masterKeyRecord.setBytes(ByteBuffer.wrap(generatedSecretKey + .getEncoded())); + } + + public MasterKeyData(MasterKey masterKeyRecord) { + this.masterKeyRecord = masterKeyRecord; + this.generatedSecretKey = + SecretManager.createSecretKey(this.masterKeyRecord.getBytes().array() + .clone()); + } + + public MasterKey getMasterKey() { + return this.masterKeyRecord; + } + + private SecretKey getSecretKey() { + return this.generatedSecretKey; + } + } + + protected final long containerTokenExpiryInterval; + + public BaseContainerTokenSecretManager(Configuration conf) { + this.containerTokenExpiryInterval = + conf.getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS); + } + + // Need lock as we increment serialNo etc. + protected MasterKeyData createNewMasterKey() { + this.writeLock.lock(); + try { + return new MasterKeyData(); + } finally { + this.writeLock.unlock(); + } + } + + @Private + public MasterKey getCurrentKey() { + this.readLock.lock(); + try { + return this.currentMasterKey.getMasterKey(); + } finally { + this.readLock.unlock(); + } + } + + @Override + public byte[] createPassword(ContainerTokenIdentifier identifier) { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating password for " + identifier.getContainerID() + + " to be run on NM " + identifier.getNmHostAddress()); + } + this.readLock.lock(); + try { + return createPassword(identifier.getBytes(), + this.currentMasterKey.getSecretKey()); + } finally { + this.readLock.unlock(); + } + } + + @Override + public byte[] retrievePassword(ContainerTokenIdentifier identifier) + throws SecretManager.InvalidToken { + this.readLock.lock(); + try { + return retrievePasswordInternal(identifier, this.currentMasterKey); + } finally { + this.readLock.unlock(); + } + } + + protected byte[] retrievePasswordInternal(ContainerTokenIdentifier identifier, + MasterKeyData masterKey) + throws org.apache.hadoop.security.token.SecretManager.InvalidToken { + if (LOG.isDebugEnabled()) { + LOG.debug("Retrieving password for " + identifier.getContainerID() + + " to be run on NM " + identifier.getNmHostAddress()); + } + return createPassword(identifier.getBytes(), masterKey.getSecretKey()); + } + + /** + * Used by the RPC layer. + */ + @Override + public ContainerTokenIdentifier createIdentifier() { + return new ContainerTokenIdentifier(); + } + + /** + * Helper function for creating ContainerTokens + * + * @param containerId + * @param nodeId + * @param capability + * @return the container-token + */ + public ContainerToken createContainerToken(ContainerId containerId, + NodeId nodeId, Resource capability) { + byte[] password; + ContainerTokenIdentifier tokenIdentifier; + long expiryTimeStamp = + System.currentTimeMillis() + containerTokenExpiryInterval; + + // Lock so that we use the same MasterKey's keyId and its bytes + this.readLock.lock(); + try { + tokenIdentifier = + new ContainerTokenIdentifier(containerId, nodeId.toString(), + capability, expiryTimeStamp, this.currentMasterKey.getMasterKey() + .getKeyId()); + password = this.createPassword(tokenIdentifier); + + } finally { + this.readLock.unlock(); + } + + return BuilderUtils.newContainerToken(nodeId, ByteBuffer.wrap(password), + tokenIdentifier); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/ContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/ContainerTokenSecretManager.java deleted file mode 100644 index 3a041c4d7d..0000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/ContainerTokenSecretManager.java +++ /dev/null @@ -1,118 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.server.security; - -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import javax.crypto.SecretKey; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.ContainerToken; -import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; -import org.apache.hadoop.yarn.util.BuilderUtils; - -/** - * SecretManager for ContainerTokens. Used by both RM and NM and hence is - * present in yarn-server-common package. - * - */ -public class ContainerTokenSecretManager extends - SecretManager { - - private static Log LOG = LogFactory - .getLog(ContainerTokenSecretManager.class); - - Map secretkeys = - new ConcurrentHashMap(); - - private final long containerTokenExpiryInterval; - - public ContainerTokenSecretManager(Configuration conf) { - this.containerTokenExpiryInterval = - conf.getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, - YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS); - } - - public ContainerToken createContainerToken(ContainerId containerId, - NodeId nodeId, Resource capability) { - try { - long expiryTimeStamp = - System.currentTimeMillis() + containerTokenExpiryInterval; - ContainerTokenIdentifier tokenIdentifier = - new ContainerTokenIdentifier(containerId, nodeId.toString(), - capability, expiryTimeStamp); - return BuilderUtils.newContainerToken(nodeId, - ByteBuffer.wrap(this.createPassword(tokenIdentifier)), tokenIdentifier); - } catch (IllegalArgumentException e) { - // this could be because DNS is down - in which case we just want - // to retry and not bring RM down. Caller should note and act on the fact - // that container is not creatable. - LOG.error("Error trying to create new container", e); - return null; - } - } - - // Used by master for generation of secretyKey per host - public SecretKey createAndGetSecretKey(CharSequence hostName) { - String hostNameStr = hostName.toString(); - if (!this.secretkeys.containsKey(hostNameStr)) { - LOG.debug("Creating secretKey for NM " + hostNameStr); - this.secretkeys.put(hostNameStr, - createSecretKey("mySecretKey".getBytes())); - } - return this.secretkeys.get(hostNameStr); - } - - // Used by slave for using secretKey sent by the master. - public void setSecretKey(CharSequence hostName, byte[] secretKeyBytes) { - this.secretkeys.put(hostName.toString(), createSecretKey(secretKeyBytes)); - } - - @Override - public byte[] createPassword(ContainerTokenIdentifier identifier) { - LOG.debug("Creating password for " + identifier.getContainerID() - + " to be run on NM " + identifier.getNmHostAddress() + " " - + this.secretkeys.get(identifier.getNmHostAddress())); - return createPassword(identifier.getBytes(), - this.secretkeys.get(identifier.getNmHostAddress())); - } - - @Override - public byte[] retrievePassword(ContainerTokenIdentifier identifier) - throws org.apache.hadoop.security.token.SecretManager.InvalidToken { - LOG.debug("Retrieving password for " + identifier.getContainerID() - + " to be run on NM " + identifier.getNmHostAddress()); - return createPassword(identifier.getBytes(), - this.secretkeys.get(identifier.getNmHostAddress())); - } - - @Override - public ContainerTokenIdentifier createIdentifier() { - return new ContainerTokenIdentifier(); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto index 4f5543e5ca..71f5b1bc88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto @@ -37,15 +37,21 @@ message NodeStatusProto { repeated ApplicationIdProto keep_alive_applications = 5; } +message MasterKeyProto { + optional int32 key_id = 1; + optional bytes bytes = 2; +} + message RegistrationResponseProto { - optional bytes secret_key = 1; + optional MasterKeyProto master_key = 1; optional NodeActionProto nodeAction = 2; } message HeartbeatResponseProto { optional int32 response_id = 1; - optional NodeActionProto nodeAction = 2; - repeated ContainerIdProto containers_to_cleanup = 3; - repeated ApplicationIdProto applications_to_cleanup = 4; + optional MasterKeyProto master_key = 2; + optional NodeActionProto nodeAction = 3; + repeated ContainerIdProto containers_to_cleanup = 4; + repeated ApplicationIdProto applications_to_cleanup = 5; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java index 3896713295..4fd206f859 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; /** * Context interface for sharing information across components in the @@ -44,5 +45,7 @@ public interface Context { ConcurrentMap getContainers(); + NMContainerTokenSecretManager getContainerTokenSecretManager(); + NodeHealthStatus getNodeHealthStatus(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index ceb3c0e688..0a68f41c84 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -46,9 +46,9 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.CompositeService; import org.apache.hadoop.yarn.service.Service; import org.apache.hadoop.yarn.service.ServiceStateChangeListener; @@ -64,7 +64,6 @@ public class NodeManager extends CompositeService implements private static final Log LOG = LogFactory.getLog(NodeManager.class); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); - protected ContainerTokenSecretManager containerTokenSecretManager; private ApplicationACLsManager aclsManager; private NodeHealthCheckerService nodeHealthChecker; private LocalDirsHandlerService dirsHandler; @@ -75,10 +74,9 @@ public NodeManager() { } protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker, - ContainerTokenSecretManager containerTokenSecretManager) { + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, - metrics, containerTokenSecretManager); + metrics); } protected NodeResourceMonitor createNodeResourceMonitor() { @@ -87,11 +85,10 @@ protected NodeResourceMonitor createNodeResourceMonitor() { protected ContainerManagerImpl createContainerManager(Context context, ContainerExecutor exec, DeletionService del, - NodeStatusUpdater nodeStatusUpdater, ContainerTokenSecretManager - containerTokenSecretManager, ApplicationACLsManager aclsManager, + NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler) { return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater, - metrics, containerTokenSecretManager, aclsManager, dirsHandler); + metrics, aclsManager, dirsHandler); } protected WebServer createWebServer(Context nmContext, @@ -110,15 +107,16 @@ public void init(Configuration conf) { conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); - Context context = new NMContext(); - // Create the secretManager if need be. + NMContainerTokenSecretManager containerTokenSecretManager = null; if (UserGroupInformation.isSecurityEnabled()) { LOG.info("Security is enabled on NodeManager. " + "Creating ContainerTokenSecretManager"); - this.containerTokenSecretManager = new ContainerTokenSecretManager(conf); + containerTokenSecretManager = new NMContainerTokenSecretManager(conf); } + Context context = new NMContext(containerTokenSecretManager); + this.aclsManager = new ApplicationACLsManager(conf); ContainerExecutor exec = ReflectionUtils.newInstance( @@ -139,8 +137,8 @@ public void init(Configuration conf) { addService(nodeHealthChecker); dirsHandler = nodeHealthChecker.getDiskHandler(); - NodeStatusUpdater nodeStatusUpdater = createNodeStatusUpdater(context, - dispatcher, nodeHealthChecker, this.containerTokenSecretManager); + NodeStatusUpdater nodeStatusUpdater = + createNodeStatusUpdater(context, dispatcher, nodeHealthChecker); nodeStatusUpdater.register(this); NodeResourceMonitor nodeResourceMonitor = createNodeResourceMonitor(); @@ -148,7 +146,7 @@ public void init(Configuration conf) { ContainerManagerImpl containerManager = createContainerManager(context, exec, del, nodeStatusUpdater, - this.containerTokenSecretManager, this.aclsManager, dirsHandler); + this.aclsManager, dirsHandler); addService(containerManager); Service webServer = createWebServer(context, containerManager @@ -192,10 +190,13 @@ public static class NMContext implements Context { private final ConcurrentMap containers = new ConcurrentSkipListMap(); + private final NMContainerTokenSecretManager containerTokenSecretManager; + private final NodeHealthStatus nodeHealthStatus = RecordFactoryProvider .getRecordFactory(null).newRecordInstance(NodeHealthStatus.class); - public NMContext() { + public NMContext(NMContainerTokenSecretManager containerTokenSecretManager) { + this.containerTokenSecretManager = containerTokenSecretManager; this.nodeHealthStatus.setIsNodeHealthy(true); this.nodeHealthStatus.setHealthReport("Healthy"); this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis()); @@ -219,6 +220,10 @@ public ConcurrentMap getContainers() { return this.containers; } + @Override + public NMContainerTokenSecretManager getContainerTokenSecretManager() { + return this.containerTokenSecretManager; + } @Override public NodeHealthStatus getNodeHealthStatus() { return this.nodeHealthStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java index 2b10c9717a..f1e6ac3bf4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java @@ -22,7 +22,5 @@ public interface NodeStatusUpdater extends Service { - byte[] getRMNMSharedSecret(); - void sendOutofBandHeartBeat(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 8209c1fb51..6954a69491 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -25,8 +25,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Map.Entry; +import java.util.Random; import org.apache.avro.AvroRuntimeException; import org.apache.commons.logging.Log; @@ -51,15 +51,14 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeAction; import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.api.records.RegistrationResponse; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.AbstractService; - public class NodeStatusUpdaterImpl extends AbstractService implements NodeStatusUpdater { @@ -71,13 +70,11 @@ public class NodeStatusUpdaterImpl extends AbstractService implements private final Dispatcher dispatcher; private NodeId nodeId; - private ContainerTokenSecretManager containerTokenSecretManager; private long heartBeatInterval; private ResourceTracker resourceTracker; private InetSocketAddress rmAddress; private Resource totalResource; private int httpPort; - private byte[] secretKeyBytes = new byte[0]; private boolean isStopped; private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); private boolean tokenKeepAliveEnabled; @@ -93,14 +90,12 @@ public class NodeStatusUpdaterImpl extends AbstractService implements private boolean hasToRebootNode; public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher, - NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics, - ContainerTokenSecretManager containerTokenSecretManager) { + NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) { super(NodeStatusUpdaterImpl.class.getName()); this.healthChecker = healthChecker; this.context = context; this.dispatcher = dispatcher; this.metrics = metrics; - this.containerTokenSecretManager = containerTokenSecretManager; } @Override @@ -194,30 +189,24 @@ private void registerWithRM() throws YarnRemoteException { throw new YarnException( "Recieved SHUTDOWN signal from Resourcemanager ,Registration of NodeManager failed"); } - - if (UserGroupInformation.isSecurityEnabled()) { - this.secretKeyBytes = regResponse.getSecretKey().array(); - } - // do this now so that its set before we start heartbeating to RM if (UserGroupInformation.isSecurityEnabled()) { + MasterKey masterKey = regResponse.getMasterKey(); + // do this now so that its set before we start heartbeating to RM LOG.info("Security enabled - updating secret keys now"); // It is expected that status updater is started by this point and - // RM gives the shared secret in registration during StatusUpdater#start(). - this.containerTokenSecretManager.setSecretKey( - this.nodeId.toString(), - this.getRMNMSharedSecret()); + // RM gives the shared secret in registration during + // StatusUpdater#start(). + if (masterKey != null) { + this.context.getContainerTokenSecretManager().setMasterKey(masterKey); + } } + LOG.info("Registered with ResourceManager as " + this.nodeId + " with total resource of " + this.totalResource); } - @Override - public byte[] getRMNMSharedSecret() { - return this.secretKeyBytes.clone(); - } - private List createKeepAliveApplicationList() { if (!tokenKeepAliveEnabled) { return Collections.emptyList(); @@ -335,6 +324,17 @@ public void run() { request.setNodeStatus(nodeStatus); HeartbeatResponse response = resourceTracker.nodeHeartbeat(request).getHeartbeatResponse(); + + // See if the master-key has rolled over + if (isSecurityEnabled()) { + MasterKey updatedMasterKey = response.getMasterKey(); + if (updatedMasterKey != null) { + // Will be non-null only on roll-over on RM side + context.getContainerTokenSecretManager().setMasterKey( + updatedMasterKey); + } + } + if (response.getNodeAction() == NodeAction.SHUTDOWN) { LOG .info("Recieved SHUTDOWN signal from Resourcemanager as part of heartbeat," + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 8ec4c5e22b..f9650fbdc2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -21,10 +21,10 @@ import static org.apache.hadoop.yarn.service.Service.STATE.STARTED; import java.io.IOException; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -96,7 +96,6 @@ import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.CompositeService; import org.apache.hadoop.yarn.service.Service; import org.apache.hadoop.yarn.service.ServiceStateChangeListener; @@ -110,14 +109,12 @@ public class ContainerManagerImpl extends CompositeService implements final Context context; private final ContainersMonitor containersMonitor; private Server server; - private InetAddress resolvedAddress = null; private final ResourceLocalizationService rsrcLocalizationSrvc; private final ContainersLauncher containersLauncher; private final AuxServices auxiliaryServices; private final NodeManagerMetrics metrics; private final NodeStatusUpdater nodeStatusUpdater; - private ContainerTokenSecretManager containerTokenSecretManager; private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); @@ -129,8 +126,7 @@ public class ContainerManagerImpl extends CompositeService implements public ContainerManagerImpl(Context context, ContainerExecutor exec, DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater, - NodeManagerMetrics metrics, ContainerTokenSecretManager - containerTokenSecretManager, ApplicationACLsManager aclsManager, + NodeManagerMetrics metrics, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler) { super(ContainerManagerImpl.class.getName()); this.context = context; @@ -149,7 +145,6 @@ public ContainerManagerImpl(Context context, ContainerExecutor exec, addService(containersLauncher); this.nodeStatusUpdater = nodeStatusUpdater; - this.containerTokenSecretManager = containerTokenSecretManager; this.aclsManager = aclsManager; // Start configurable services @@ -232,7 +227,7 @@ public void start() { server = rpc.getServer(ContainerManager.class, this, initialAddress, conf, - this.containerTokenSecretManager, + this.context.getContainerTokenSecretManager(), conf.getInt(YarnConfiguration.NM_CONTAINER_MGR_THREAD_COUNT, YarnConfiguration.DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT)); @@ -267,24 +262,9 @@ public void stop() { super.stop(); } - /** - * Authorize the request. - * - * @param containerID - * of the container - * @param launchContext - * passed if verifying the startContainer, null otherwise. - * @throws YarnRemoteException - */ - private void authorizeRequest(ContainerId containerID, - ContainerLaunchContext launchContext) throws YarnRemoteException { - - if (!UserGroupInformation.isSecurityEnabled()) { - return; - } - - String containerIDStr = containerID.toString(); - + // Get the remoteUGI corresponding to the api call. + private UserGroupInformation getRemoteUgi(String containerIDStr) + throws YarnRemoteException { UserGroupInformation remoteUgi; try { remoteUgi = UserGroupInformation.getCurrentUser(); @@ -295,28 +275,65 @@ private void authorizeRequest(ContainerId containerID, LOG.warn(msg); throw RPCUtil.getRemoteException(msg); } + return remoteUgi; + } + + // Obtain the needed ContainerTokenIdentifier from the remote-UGI. RPC layer + // currently sets only the required id, but iterate through anyways just to + // be sure. + private ContainerTokenIdentifier selectContainerTokenIdentifier( + UserGroupInformation remoteUgi) { + Set tokenIdentifiers = remoteUgi.getTokenIdentifiers(); + ContainerTokenIdentifier resultId = null; + for (TokenIdentifier id : tokenIdentifiers) { + if (id instanceof ContainerTokenIdentifier) { + resultId = (ContainerTokenIdentifier) id; + break; + } + } + return resultId; + } + + /** + * Authorize the request. + * + * @param containerIDStr + * of the container + * @param launchContext + * passed if verifying the startContainer, null otherwise. + * @param remoteUgi + * ugi corresponding to the remote end making the api-call + * @throws YarnRemoteException + */ + private void authorizeRequest(String containerIDStr, + ContainerLaunchContext launchContext, UserGroupInformation remoteUgi) + throws YarnRemoteException { + + if (!UserGroupInformation.isSecurityEnabled()) { + return; + } boolean unauthorized = false; - StringBuilder messageBuilder = new StringBuilder( - "Unauthorized request to start container. "); + StringBuilder messageBuilder = + new StringBuilder("Unauthorized request to start container. "); if (!remoteUgi.getUserName().equals(containerIDStr)) { unauthorized = true; messageBuilder.append("\nExpected containerId: " + remoteUgi.getUserName() + " Found: " + containerIDStr); - } - - if (launchContext != null) { - - // Verify other things for startContainer() request. + } else if (launchContext != null) { + // Verify other things also for startContainer() request. if (LOG.isDebugEnabled()) { - LOG.debug("Number of TokenIdentifiers in the UGI from RPC: " - + remoteUgi.getTokenIdentifiers().size()); + LOG.debug("Number of TokenIdentifiers in the UGI from RPC: " + + remoteUgi.getTokenIdentifiers().size()); } - // We must and should get only one TokenIdentifier from the RPC. - ContainerTokenIdentifier tokenId = (ContainerTokenIdentifier) remoteUgi - .getTokenIdentifiers().iterator().next(); + + + // Get the tokenId from the remote user ugi + ContainerTokenIdentifier tokenId = + selectContainerTokenIdentifier(remoteUgi); + if (tokenId == null) { unauthorized = true; messageBuilder @@ -324,6 +341,15 @@ private void authorizeRequest(ContainerId containerID, + containerIDStr); } else { + // Is the container being relaunched? Or RPC layer let startCall with + // tokens generated off old-secret through + if (!this.context.getContainerTokenSecretManager() + .isValidStartContainerRequest(tokenId)) { + unauthorized = true; + messageBuilder.append("\n Attempt to relaunch the same " + + "container with id " + containerIDStr + "."); + } + // Ensure the token is not expired. // Token expiry is not checked for stopContainer/getContainerStatus if (tokenId.getExpiryTimeStamp() < System.currentTimeMillis()) { @@ -348,7 +374,7 @@ private void authorizeRequest(ContainerId containerID, throw RPCUtil.getRemoteException(msg); } } - + /** * Start a container on this NodeManager. */ @@ -359,10 +385,13 @@ public StartContainerResponse startContainer(StartContainerRequest request) ContainerLaunchContext launchContext = request.getContainerLaunchContext(); ContainerId containerID = launchContext.getContainerId(); - authorizeRequest(containerID, launchContext); + String containerIDStr = containerID.toString(); - LOG.info("Start request for " + launchContext.getContainerId() - + " by user " + launchContext.getUser()); + UserGroupInformation remoteUgi = getRemoteUgi(containerIDStr); + authorizeRequest(containerIDStr, launchContext, remoteUgi); + + LOG.info("Start request for " + containerIDStr + " by user " + + launchContext.getUser()); // //////////// Parse credentials ByteBuffer tokens = launchContext.getContainerTokens(); @@ -394,14 +423,14 @@ public StartContainerResponse startContainer(StartContainerRequest request) AuditConstants.START_CONTAINER, "ContainerManagerImpl", "Container already running on this node!", applicationID, containerID); - throw RPCUtil.getRemoteException("Container " + containerID + throw RPCUtil.getRemoteException("Container " + containerIDStr + " already is running on this node!!"); } // Create the application Application application = new ApplicationImpl(dispatcher, this.aclsManager, - launchContext.getUser(), applicationID, credentials, context); + launchContext.getUser(), applicationID, credentials, context); if (null == context.getApplications().putIfAbsent(applicationID, application)) { LOG.info("Creating a new application reference for app " @@ -414,6 +443,12 @@ public StartContainerResponse startContainer(StartContainerRequest request) // TODO: Validate the request dispatcher.getEventHandler().handle( new ApplicationContainerInitEvent(container)); + if (UserGroupInformation.isSecurityEnabled()) { + ContainerTokenIdentifier tokenId = + selectContainerTokenIdentifier(remoteUgi); + this.context.getContainerTokenSecretManager().startContainerSuccessful( + tokenId); + } NMAuditLogger.logSuccess(launchContext.getUser(), AuditConstants.START_CONTAINER, "ContainerManageImpl", @@ -438,8 +473,12 @@ public StopContainerResponse stopContainer(StopContainerRequest request) throws YarnRemoteException { ContainerId containerID = request.getContainerId(); + String containerIDStr = containerID.toString(); + // TODO: Only the container's owner can kill containers today. - authorizeRequest(containerID, null); + + UserGroupInformation remoteUgi = getRemoteUgi(containerIDStr); + authorizeRequest(containerIDStr, null, remoteUgi); StopContainerResponse response = recordFactory.newRecordInstance(StopContainerResponse.class); @@ -476,10 +515,14 @@ public GetContainerStatusResponse getContainerStatus( GetContainerStatusRequest request) throws YarnRemoteException { ContainerId containerID = request.getContainerId(); - // TODO: Only the container's owner can get containers' status today. - authorizeRequest(containerID, null); + String containerIDStr = containerID.toString(); - LOG.info("Getting container-status for " + containerID); + // TODO: Only the container's owner can get containers' status today. + + UserGroupInformation remoteUgi = getRemoteUgi(containerIDStr); + authorizeRequest(containerIDStr, null, remoteUgi); + + LOG.info("Getting container-status for " + containerIDStr); Container container = this.context.getContainers().get(containerID); if (container != null) { ContainerStatus containerStatus = container.cloneAndGetContainerStatus(); @@ -490,7 +533,7 @@ public GetContainerStatusResponse getContainerStatus( return response; } - throw RPCUtil.getRemoteException("Container " + containerID + throw RPCUtil.getRemoteException("Container " + containerIDStr + " is not handled by this NodeManager"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java index 2c61b70a07..491cbe48de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java @@ -28,8 +28,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; @@ -42,6 +43,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; @@ -365,6 +367,10 @@ static class AppCompletelyDoneTransition implements @Override public void transition(ApplicationImpl app, ApplicationEvent event) { + // Inform the ContainerTokenSecretManager + if (UserGroupInformation.isSecurityEnabled()) { + app.context.getContainerTokenSecretManager().appFinished(app.appId); + } // Inform the logService app.dispatcher.getEventHandler().handle( new LogHandlerAppFinishedEvent(app.appId)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java new file mode 100644 index 0000000000..29253eaf7a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java @@ -0,0 +1,189 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.nodemanager.security; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; + +/** + * The NM maintains only two master-keys. The current key that RM knows and the + * key from the previous rolling-interval. + * + */ +public class NMContainerTokenSecretManager extends + BaseContainerTokenSecretManager { + + private static final Log LOG = LogFactory + .getLog(NMContainerTokenSecretManager.class); + + private MasterKeyData previousMasterKey; + + private final Map> oldMasterKeys; + + public NMContainerTokenSecretManager(Configuration conf) { + super(conf); + this.oldMasterKeys = + new HashMap>(); + } + + /** + * Used by NodeManagers to create a token-secret-manager with the key obtained + * from the RM. This can happen during registration or when the RM rolls the + * master-key and signals the NM. + * + * @param masterKeyRecord + */ + @Private + public synchronized void setMasterKey(MasterKey masterKeyRecord) { + LOG.info("Rolling master-key for container-tokens, got key with id " + + masterKeyRecord.getKeyId()); + if (super.currentMasterKey == null) { + super.currentMasterKey = new MasterKeyData(masterKeyRecord); + } else { + if (super.currentMasterKey.getMasterKey().getKeyId() != masterKeyRecord + .getKeyId()) { + // Update keys only if the key has changed. + this.previousMasterKey = super.currentMasterKey; + super.currentMasterKey = new MasterKeyData(masterKeyRecord); + } + } + } + + /** + * Override of this is to validate ContainerTokens generated by using + * different {@link MasterKey}s. + */ + @Override + public synchronized byte[] retrievePassword( + ContainerTokenIdentifier identifier) throws SecretManager.InvalidToken { + int keyId = identifier.getMasterKeyId(); + ContainerId containerId = identifier.getContainerID(); + ApplicationId appId = + containerId.getApplicationAttemptId().getApplicationId(); + + MasterKeyData masterKeyToUse = null; + + if (this.previousMasterKey != null + && keyId == this.previousMasterKey.getMasterKey().getKeyId()) { + // A container-launch has come in with a token generated off the last + // master-key + masterKeyToUse = this.previousMasterKey; + } else if (keyId == super.currentMasterKey.getMasterKey().getKeyId()) { + // A container-launch has come in with a token generated off the current + // master-key + masterKeyToUse = super.currentMasterKey; + } else if (this.oldMasterKeys.containsKey(appId) + && this.oldMasterKeys.get(appId).containsKey(containerId)) { + // This means on the following happened: + // (1) a stopContainer() or a getStatus() happened for a container with + // token generated off a master-key that is neither current nor the + // previous one. + // (2) a container-relaunch has come in with a token generated off a + // master-key that is neither current nor the previous one. + // This basically lets stop and getStatus() calls with old-tokens to pass + // through without any issue, i.e. (1). + // Start-calls for repetitive launches (2) also pass through RPC here, but + // get thwarted at the app-layer as part of startContainer() call. + masterKeyToUse = this.oldMasterKeys.get(appId).get(containerId); + } + + if (masterKeyToUse != null) { + return retrievePasswordInternal(identifier, masterKeyToUse); + } + + // Invalid request. Like startContainer() with token generated off + // old-master-keys. + throw new SecretManager.InvalidToken("Given Container " + + identifier.getContainerID().toString() + + " seems to have an illegally generated token."); + } + + /** + * Container start has gone through. Store the corresponding keys so that + * stopContainer() and getContainerStatus() can be authenticated long after + * the container-start went through. + */ + public synchronized void startContainerSuccessful( + ContainerTokenIdentifier tokenId) { + if (!UserGroupInformation.isSecurityEnabled()) { + return; + } + + int keyId = tokenId.getMasterKeyId(); + if (currentMasterKey.getMasterKey().getKeyId() == keyId) { + addKeyForContainerId(tokenId.getContainerID(), currentMasterKey); + } else if (previousMasterKey != null + && previousMasterKey.getMasterKey().getKeyId() == keyId) { + addKeyForContainerId(tokenId.getContainerID(), previousMasterKey); + } + } + + /** + * Ensure the startContainer call is not using an older cached key. Will + * return false once startContainerSuccessful is called. Does not check + * the actual key being current since that is verified by the security layer + * via retrievePassword. + */ + public synchronized boolean isValidStartContainerRequest( + ContainerTokenIdentifier tokenId) { + ContainerId containerID = tokenId.getContainerID(); + ApplicationId applicationId = + containerID.getApplicationAttemptId().getApplicationId(); + return !this.oldMasterKeys.containsKey(applicationId) + || !this.oldMasterKeys.get(applicationId).containsKey(containerID); + } + + private synchronized void addKeyForContainerId(ContainerId containerId, + MasterKeyData masterKeyData) { + if (containerId != null) { + ApplicationId appId = + containerId.getApplicationAttemptId().getApplicationId(); + if (!this.oldMasterKeys.containsKey(appId)) { + this.oldMasterKeys.put(appId, + new ConcurrentHashMap()); + } + ConcurrentMap containerIdToKeysMapForThisApp = + this.oldMasterKeys.get(appId); + containerIdToKeysMapForThisApp.put(containerId, masterKeyData); + } else { + LOG.warn("Not adding key for null containerId"); + } + } + + // Holding on to master-keys corresponding to containers until the app is + // finished due to the multiple ways a container can finish. Avoid + // stopContainer calls seeing unnecessary authorization exceptions. + public synchronized void appFinished(ApplicationId appId) { + this.oldMasterKeys.remove(appId); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java index bf429da734..d848405708 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java @@ -27,8 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; @@ -50,6 +48,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; public class DummyContainerManager extends ContainerManagerImpl { @@ -59,11 +58,10 @@ public class DummyContainerManager extends ContainerManagerImpl { public DummyContainerManager(Context context, ContainerExecutor exec, DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater, NodeManagerMetrics metrics, - ContainerTokenSecretManager containerTokenSecretManager, ApplicationACLsManager applicationACLsManager, LocalDirsHandlerService dirsHandler) { super(context, exec, deletionContext, nodeStatusUpdater, metrics, - containerTokenSecretManager, applicationACLsManager, dirsHandler); + applicationACLsManager, dirsHandler); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index dcdc1b91ce..b1283b5b6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -36,12 +36,12 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.server.api.ResourceTracker; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.junit.Test; public class TestEventFlow { @@ -69,9 +69,9 @@ public void testSuccessfulContainerLaunch() throws InterruptedException, localLogDir.mkdir(); remoteLogDir.mkdir(); - Context context = new NMContext(); - YarnConfiguration conf = new YarnConfiguration(); + Context context = new NMContext(new NMContainerTokenSecretManager(conf)); + conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, @@ -86,10 +86,8 @@ public void testSuccessfulContainerLaunch() throws InterruptedException, healthChecker.init(conf); LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler(); NodeManagerMetrics metrics = NodeManagerMetrics.create(); - ContainerTokenSecretManager containerTokenSecretManager = - new ContainerTokenSecretManager(conf); NodeStatusUpdater nodeStatusUpdater = - new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics, containerTokenSecretManager) { + new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics) { @Override protected ResourceTracker getRMClient() { return new LocalRMInterface(); @@ -101,10 +99,9 @@ protected void startStatusUpdater() { } }; - DummyContainerManager containerManager = new DummyContainerManager( - context, exec, del, nodeStatusUpdater, metrics, - containerTokenSecretManager, new ApplicationACLsManager(conf), - dirsHandler); + DummyContainerManager containerManager = + new DummyContainerManager(context, exec, del, nodeStatusUpdater, + metrics, new ApplicationACLsManager(conf), dirsHandler); containerManager.init(conf); containerManager.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java index 8a6dabf1b2..d33d73a8a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java @@ -17,37 +17,29 @@ */ package org.apache.hadoop.yarn.server.nodemanager; +import static junit.framework.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.net.InetAddress; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRPC.TestImpl; import org.apache.hadoop.ipc.TestRPC.TestProtocol; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; -import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; - -import org.apache.hadoop.net.NetUtils; - -import static org.mockito.Mockito.*; -import static junit.framework.Assert.*; -import org.junit.After; import org.junit.Before; import org.junit.Test; - /** * Tests {@link NMAuditLogger}. */ public class TestNMAuditLogger { - private static final Log LOG = LogFactory.getLog(TestNMAuditLogger.class); private static final String USER = "test"; private static final String OPERATION = "oper"; private static final String TARGET = "tgt"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 30c54e2dd6..a2f569b382 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -65,7 +65,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.Service; import org.apache.hadoop.yarn.service.Service.STATE; import org.apache.hadoop.yarn.util.BuilderUtils; @@ -234,10 +233,8 @@ private class MyNodeStatusUpdater extends NodeStatusUpdaterImpl { private Context context; public MyNodeStatusUpdater(Context context, Dispatcher dispatcher, - NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics, - ContainerTokenSecretManager containerTokenSecretManager) { - super(context, dispatcher, healthChecker, metrics, - containerTokenSecretManager); + NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) { + super(context, dispatcher, healthChecker, metrics); this.context = context; } @@ -252,10 +249,8 @@ private class MyNodeStatusUpdater3 extends NodeStatusUpdaterImpl { private Context context; public MyNodeStatusUpdater3(Context context, Dispatcher dispatcher, - NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics, - ContainerTokenSecretManager containerTokenSecretManager) { - super(context, dispatcher, healthChecker, metrics, - containerTokenSecretManager); + NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) { + super(context, dispatcher, healthChecker, metrics); this.context = context; this.resourceTracker = new MyResourceTracker3(this.context); } @@ -276,11 +271,9 @@ private class MyNodeManager extends NodeManager { private MyNodeStatusUpdater3 nodeStatusUpdater; @Override protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker, - ContainerTokenSecretManager containerTokenSecretManager) { + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { this.nodeStatusUpdater = - new MyNodeStatusUpdater3(context, dispatcher, healthChecker, metrics, - containerTokenSecretManager); + new MyNodeStatusUpdater3(context, dispatcher, healthChecker, metrics); return this.nodeStatusUpdater; } @@ -398,10 +391,9 @@ public void testNMRegistration() throws InterruptedException { nm = new NodeManager() { @Override protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker, - ContainerTokenSecretManager containerTokenSecretManager) { + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { return new MyNodeStatusUpdater(context, dispatcher, healthChecker, - metrics, containerTokenSecretManager); + metrics); } }; @@ -528,11 +520,9 @@ public void testNMShutdownForRegistrationFailure() { nm = new NodeManager() { @Override protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker, - ContainerTokenSecretManager containerTokenSecretManager) { + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { MyNodeStatusUpdater nodeStatusUpdater = new MyNodeStatusUpdater( - context, dispatcher, healthChecker, metrics, - containerTokenSecretManager); + context, dispatcher, healthChecker, metrics); MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2(); myResourceTracker2.registerNodeAction = NodeAction.SHUTDOWN; nodeStatusUpdater.resourceTracker = myResourceTracker2; @@ -556,22 +546,19 @@ public void testNoRegistrationWhenNMServicesFail() { nm = new NodeManager() { @Override protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker, - ContainerTokenSecretManager containerTokenSecretManager) { + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { return new MyNodeStatusUpdater(context, dispatcher, healthChecker, - metrics, containerTokenSecretManager); + metrics); } @Override protected ContainerManagerImpl createContainerManager(Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, - ContainerTokenSecretManager containerTokenSecretManager, ApplicationACLsManager aclsManager, LocalDirsHandlerService diskhandler) { - return new ContainerManagerImpl(context, exec, del, - nodeStatusUpdater, metrics, containerTokenSecretManager, - aclsManager, diskhandler) { + return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater, + metrics, aclsManager, diskhandler) { @Override public void start() { // Simulating failure of starting RPC server @@ -654,11 +641,9 @@ private NodeManager getNodeManager(final NodeAction nodeHeartBeatAction) { return new NodeManager() { @Override protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker, - ContainerTokenSecretManager containerTokenSecretManager) { + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { MyNodeStatusUpdater myNodeStatusUpdater = new MyNodeStatusUpdater( - context, dispatcher, healthChecker, metrics, - containerTokenSecretManager); + context, dispatcher, healthChecker, metrics); MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2(); myResourceTracker2.heartBeatNodeAction = nodeHeartBeatAction; myNodeStatusUpdater.resourceTracker = myResourceTracker2; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index 4e42926bbd..8300d8fee3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -54,8 +54,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.Service.STATE; import org.junit.After; import org.junit.Before; @@ -70,8 +70,6 @@ public abstract class BaseContainerManagerTest { protected static File localLogDir; protected static File remoteLogDir; protected static File tmpDir; - protected ContainerTokenSecretManager containerTokenSecretManager = - new ContainerTokenSecretManager(new Configuration()); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); @@ -93,7 +91,8 @@ public BaseContainerManagerTest() throws UnsupportedFileSystemException { .getLog(BaseContainerManagerTest.class); protected Configuration conf = new YarnConfiguration(); - protected Context context = new NMContext(); + protected Context context = new NMContext(new NMContainerTokenSecretManager( + conf)); protected ContainerExecutor exec; protected DeletionService delSrvc; protected String user = "nobody"; @@ -101,7 +100,7 @@ public BaseContainerManagerTest() throws UnsupportedFileSystemException { protected LocalDirsHandlerService dirsHandler; protected NodeStatusUpdater nodeStatusUpdater = new NodeStatusUpdaterImpl( - context, new AsyncDispatcher(), null, metrics, this.containerTokenSecretManager) { + context, new AsyncDispatcher(), null, metrics) { @Override protected ResourceTracker getRMClient() { return new LocalRMInterface(); @@ -155,9 +154,9 @@ public void delete(String user, Path subDir, Path[] baseDirs) { nodeHealthChecker = new NodeHealthCheckerService(); nodeHealthChecker.init(conf); dirsHandler = nodeHealthChecker.getDiskHandler(); - containerManager = new ContainerManagerImpl(context, exec, delSrvc, - nodeStatusUpdater, metrics, this.containerTokenSecretManager, - new ApplicationACLsManager(conf), dirsHandler); + containerManager = + new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, + metrics, new ApplicationACLsManager(conf), dirsHandler); containerManager.init(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index 3ec5fa2f86..5b01cc0814 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -51,14 +51,13 @@ import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; -import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; +import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.Test; @@ -384,11 +383,9 @@ public void testLocalFilesCleanup() throws InterruptedException, delSrvc = new DeletionService(exec); delSrvc.init(conf); - ContainerTokenSecretManager containerTokenSecretManager = new - ContainerTokenSecretManager(conf); - containerManager = new ContainerManagerImpl(context, exec, delSrvc, - nodeStatusUpdater, metrics, containerTokenSecretManager, - new ApplicationACLsManager(conf), dirsHandler); + containerManager = + new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, + metrics, new ApplicationACLsManager(conf), dirsHandler); containerManager.init(conf); containerManager.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java index 4b3736024b..3d742cb696 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java @@ -30,9 +30,9 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.event.DrainDispatcher; @@ -50,8 +50,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java index 5808b865db..76ace14ed7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java @@ -76,7 +76,7 @@ public void tearDown() { } private String startNMWebAppServer(String webAddr) { - Context nmContext = new NodeManager.NMContext(); + Context nmContext = new NodeManager.NMContext(null); ResourceView resourceView = new ResourceView() { @Override public long getVmemAllocatedForContainers() { @@ -116,7 +116,7 @@ public void testNMWebAppWithEphemeralPort() throws IOException { @Test public void testNMWebApp() throws IOException { - Context nmContext = new NodeManager.NMContext(); + Context nmContext = new NodeManager.NMContext(null); ResourceView resourceView = new ResourceView() { @Override public long getVmemAllocatedForContainers() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java index a12fb84bef..1a48db1620 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java @@ -85,7 +85,7 @@ public class TestNMWebServices extends JerseyTest { private Injector injector = Guice.createInjector(new ServletModule() { @Override protected void configureServlets() { - nmContext = new NodeManager.NMContext(); + nmContext = new NodeManager.NMContext(null); nmContext.getNodeId().setHost("testhost.foo.com"); nmContext.getNodeId().setPort(8042); resourceView = new ResourceView() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java index 6b6278ccba..e5d90b2c8c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java @@ -91,7 +91,7 @@ public class TestNMWebServicesApps extends JerseyTest { private Injector injector = Guice.createInjector(new ServletModule() { @Override protected void configureServlets() { - nmContext = new NodeManager.NMContext(); + nmContext = new NodeManager.NMContext(null); nmContext.getNodeId().setHost("testhost.foo.com"); nmContext.getNodeId().setPort(9999); resourceView = new ResourceView() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java index ef2e813262..8abf160bd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java @@ -91,7 +91,7 @@ public class TestNMWebServicesContainers extends JerseyTest { private Injector injector = Guice.createInjector(new ServletModule() { @Override protected void configureServlets() { - nmContext = new NodeManager.NMContext(); + nmContext = new NodeManager.NMContext(null); nmContext.getNodeId().setHost("testhost.foo.com"); nmContext.getNodeId().setPort(8042); resourceView = new ResourceView() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index b0a1a96166..f100b3aae8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -21,11 +21,11 @@ import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.hadoop.ipc.Server; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.UserGroupInformation; @@ -171,8 +171,7 @@ public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) throws YarnRemoteException { UserGroupInformation user = checkAcls("refreshQueues"); try { - scheduler.reinitialize(conf, null, null); // ContainerTokenSecretManager can't - // be 'refreshed' + scheduler.reinitialize(conf, this.rmContext); RMAuditLogger.logSuccess(user.getShortUserName(), "refreshQueues", "AdminService"); return recordFactory.newRecordInstance(RefreshQueuesResponse.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index ccd1930b29..3a113fc0cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; /** * Context of the ResourceManager. @@ -58,4 +59,6 @@ public interface RMContext { DelegationTokenRenewer getDelegationTokenRenewer(); ApplicationTokenSecretManager getApplicationTokenSecretManager(); + + RMContainerTokenSecretManager getContainerTokenSecretManager(); } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index e9055536f8..525033c5e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; public class RMContextImpl implements RMContext { @@ -53,13 +54,15 @@ public class RMContextImpl implements RMContext { private ContainerAllocationExpirer containerAllocationExpirer; private final DelegationTokenRenewer tokenRenewer; private final ApplicationTokenSecretManager appTokenSecretManager; + private final RMContainerTokenSecretManager containerTokenSecretManager; public RMContextImpl(Store store, Dispatcher rmDispatcher, ContainerAllocationExpirer containerAllocationExpirer, AMLivelinessMonitor amLivelinessMonitor, AMLivelinessMonitor amFinishingMonitor, DelegationTokenRenewer tokenRenewer, - ApplicationTokenSecretManager appTokenSecretManager) { + ApplicationTokenSecretManager appTokenSecretManager, + RMContainerTokenSecretManager containerTokenSecretManager) { this.store = store; this.rmDispatcher = rmDispatcher; this.containerAllocationExpirer = containerAllocationExpirer; @@ -67,6 +70,7 @@ public RMContextImpl(Store store, Dispatcher rmDispatcher, this.amFinishingMonitor = amFinishingMonitor; this.tokenRenewer = tokenRenewer; this.appTokenSecretManager = appTokenSecretManager; + this.containerTokenSecretManager = containerTokenSecretManager; } @Override @@ -123,4 +127,9 @@ public DelegationTokenRenewer getDelegationTokenRenewer() { public ApplicationTokenSecretManager getApplicationTokenSecretManager() { return this.appTokenSecretManager; } + + @Override + public RMContainerTokenSecretManager getContainerTokenSecretManager() { + return this.containerTokenSecretManager; + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index f0e84cbd4f..e7dfa4d1c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -48,8 +48,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; @@ -67,9 +67,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.server.webproxy.AppReportFetcher; import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; import org.apache.hadoop.yarn.server.webproxy.WebAppProxy; @@ -100,7 +100,7 @@ public class ResourceManager extends CompositeService implements Recoverable { protected ClientToAMSecretManager clientToAMSecretManager = new ClientToAMSecretManager(); - protected ContainerTokenSecretManager containerTokenSecretManager; + protected RMContainerTokenSecretManager containerTokenSecretManager; protected ApplicationTokenSecretManager appTokenSecretManager; @@ -150,8 +150,6 @@ public synchronized void init(Configuration conf) { this.rmDispatcher); addService(this.containerAllocationExpirer); - this.containerTokenSecretManager = new ContainerTokenSecretManager(conf); - AMLivelinessMonitor amLivelinessMonitor = createAMLivelinessMonitor(); addService(amLivelinessMonitor); @@ -160,11 +158,14 @@ public synchronized void init(Configuration conf) { DelegationTokenRenewer tokenRenewer = createDelegationTokenRenewer(); addService(tokenRenewer); + + this.containerTokenSecretManager = new RMContainerTokenSecretManager(conf); - this.rmContext = new RMContextImpl(this.store, this.rmDispatcher, - this.containerAllocationExpirer, - amLivelinessMonitor, amFinishingMonitor, - tokenRenewer, this.appTokenSecretManager); + this.rmContext = + new RMContextImpl(this.store, this.rmDispatcher, + this.containerAllocationExpirer, amLivelinessMonitor, + amFinishingMonitor, tokenRenewer, this.appTokenSecretManager, + this.containerTokenSecretManager); // Register event handler for NodesListManager this.nodesListManager = new NodesListManager(this.rmContext); @@ -198,8 +199,7 @@ public synchronized void init(Configuration conf) { addService(resourceTracker); try { - this.scheduler.reinitialize(conf, - this.containerTokenSecretManager, this.rmContext); + this.scheduler.reinitialize(conf, this.rmContext); } catch (IOException ioe) { throw new RuntimeException("Failed to initialize scheduler", ioe); } @@ -486,6 +486,7 @@ public void start() { } this.appTokenSecretManager.start(); + this.containerTokenSecretManager.start(); startWepApp(); DefaultMetricsSystem.initialize("ResourceManager"); @@ -531,6 +532,7 @@ public void stop() { rmDTSecretManager.stopThreads(); this.appTokenSecretManager.stop(); + this.containerTokenSecretManager.stop(); /*synchronized(shutdown) { shutdown.set(true); @@ -616,7 +618,7 @@ public ApplicationACLsManager getApplicationACLsManager() { } @Private - public ContainerTokenSecretManager getContainerTokenSecretManager() { + public RMContainerTokenSecretManager getRMContainerTokenSecretManager() { return this.containerTokenSecretManager; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 06ad11ff1e..93d073079f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -18,9 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; - -import javax.crypto.SecretKey; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -28,6 +25,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.Node; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; @@ -42,6 +40,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeAction; import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.api.records.RegistrationResponse; @@ -52,8 +51,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeReconnectEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.RackResolver; @@ -68,7 +67,7 @@ public class ResourceTrackerService extends AbstractService implements private final RMContext rmContext; private final NodesListManager nodesListManager; private final NMLivelinessMonitor nmLivelinessMonitor; - private final ContainerTokenSecretManager containerTokenSecretManager; + private final RMContainerTokenSecretManager containerTokenSecretManager; private Server server; private InetSocketAddress resourceTrackerAddress; @@ -93,7 +92,7 @@ public class ResourceTrackerService extends AbstractService implements public ResourceTrackerService(RMContext rmContext, NodesListManager nodesListManager, NMLivelinessMonitor nmLivelinessMonitor, - ContainerTokenSecretManager containerTokenSecretManager) { + RMContainerTokenSecretManager containerTokenSecretManager) { super(ResourceTrackerService.class.getName()); this.rmContext = rmContext; this.nodesListManager = nodesListManager; @@ -160,9 +159,6 @@ public RegisterNodeManagerResponse registerNodeManager( .newRecordInstance(RegisterNodeManagerResponse.class); RegistrationResponse regResponse = recordFactory .newRecordInstance(RegistrationResponse.class); - SecretKey secretKey = this.containerTokenSecretManager - .createAndGetSecretKey(nodeId.toString()); - regResponse.setSecretKey(ByteBuffer.wrap(secretKey.getEncoded())); // Check if this node is a 'valid' node if (!this.nodesListManager.isValidNode(host)) { @@ -173,8 +169,14 @@ public RegisterNodeManagerResponse registerNodeManager( return response; } + MasterKey nextMasterKeyForNode = null; + if (isSecurityEnabled()) { + nextMasterKeyForNode = this.containerTokenSecretManager.getCurrentKey(); + regResponse.setMasterKey(nextMasterKeyForNode); + } + RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, - resolve(host), capability); + resolve(host), capability, nextMasterKeyForNode); RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); if (oldNode == null) { @@ -236,7 +238,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) NodeHeartbeatResponse nodeHeartBeatResponse = recordFactory .newRecordInstance(NodeHeartbeatResponse.class); - + // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat HeartbeatResponse lastHeartbeatResponse = rmNode.getLastHeartBeatResponse(); if (remoteNodeStatus.getResponseId() + 1 == lastHeartbeatResponse @@ -264,11 +266,32 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) latestResponse.addAllApplicationsToCleanup(rmNode.getAppsToCleanup()); latestResponse.setNodeAction(NodeAction.NORMAL); + MasterKey nextMasterKeyForNode = null; + + // Check if node's masterKey needs to be updated and if the currentKey has + // roller over, send it across + if (isSecurityEnabled()) { + boolean shouldSendMasterKey = false; + MasterKey nodeKnownMasterKey = rmNode.getCurrentMasterKey(); + nextMasterKeyForNode = this.containerTokenSecretManager.getNextKey(); + if (nextMasterKeyForNode != null) { + // nextMasterKeyForNode can be null if there is no outstanding key that + // is in the activation period. + if (nodeKnownMasterKey.getKeyId() != nextMasterKeyForNode.getKeyId()) { + shouldSendMasterKey = true; + } + } + if (shouldSendMasterKey) { + latestResponse.setMasterKey(nextMasterKeyForNode); + } + } + // 4. Send status to RMNode, saving the latest response. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeStatusEvent(nodeId, remoteNodeStatus.getNodeHealthStatus(), remoteNodeStatus.getContainersStatuses(), - remoteNodeStatus.getKeepAliveApplications(), latestResponse)); + remoteNodeStatus.getKeepAliveApplications(), latestResponse, + nextMasterKeyForNode)); nodeHeartBeatResponse.setHeartbeatResponse(latestResponse); return nodeHeartBeatResponse; @@ -309,5 +332,8 @@ void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) { this.server.refreshServiceAcl(configuration, policyProvider); } - + + protected boolean isSecurityEnabled() { + return UserGroupInformation.isSecurityEnabled(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index 006e13e610..d642422508 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; /** * Node managers information on available resources @@ -106,4 +107,6 @@ public interface RMNode { public List getAppsToCleanup(); public HeartbeatResponse getLastHeartBeatResponse(); -} \ No newline at end of file + + public MasterKey getCurrentMasterKey(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 9a926e0ced..184a981dbb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEvent; import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType; @@ -103,6 +104,8 @@ public class RMNodeImpl implements RMNode, EventHandler { private HeartbeatResponse latestHeartBeatResponse = recordFactory .newRecordInstance(HeartbeatResponse.class); + + private MasterKey currentMasterKey; private static final StateMachineFactory stateMachine; public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, - int cmPort, int httpPort, Node node, Resource capability) { + int cmPort, int httpPort, Node node, Resource capability, + MasterKey masterKey) { this.nodeId = nodeId; this.context = context; this.hostName = hostName; @@ -165,6 +169,7 @@ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, this.nodeAddress = hostName + ":" + cmPort; this.httpAddress = hostName + ":" + httpPort; this.node = node; + this.currentMasterKey = masterKey; this.nodeHealthStatus.setIsNodeHealthy(true); this.nodeHealthStatus.setHealthReport("Healthy"); this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis()); @@ -298,6 +303,17 @@ public HeartbeatResponse getLastHeartBeatResponse() { this.readLock.unlock(); } } + + @Override + public MasterKey getCurrentMasterKey() { + this.readLock.lock(); + try { + return this.currentMasterKey; + } finally { + this.readLock.unlock(); + } + } + public void handle(RMNodeEvent event) { LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType()); @@ -475,6 +491,7 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // Switch the last heartbeatresponse. rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse(); + rmNode.currentMasterKey = statusEvent.getCurrentMasterKey(); NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus(); @@ -539,6 +556,7 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // HeartBeat processing from our end is done, as node pulls the following // lists before sending status-updates. Clear data-structures + // TODO: These lists could go to the NM multiple times, or never. rmNode.containersToClean.clear(); rmNode.finishedApplications.clear(); @@ -555,6 +573,7 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // Switch the last heartbeatresponse. rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse(); + rmNode.currentMasterKey = statusEvent.getCurrentMasterKey(); NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus(); rmNode.setNodeHealthStatus(remoteNodeHealthStatus); if (remoteNodeHealthStatus.getIsNodeHealthy()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java index 1285c2bed9..36c877d9e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java @@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; public class RMNodeStatusEvent extends RMNodeEvent { @@ -32,15 +33,17 @@ public class RMNodeStatusEvent extends RMNodeEvent { private final List containersCollection; private final HeartbeatResponse latestResponse; private final List keepAliveAppIds; + private final MasterKey currentMasterKey; public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus, List collection, List keepAliveAppIds, - HeartbeatResponse latestResponse) { + HeartbeatResponse latestResponse, MasterKey currentMasterKey) { super(nodeId, RMNodeEventType.STATUS_UPDATE); this.nodeHealthStatus = nodeHealthStatus; this.containersCollection = collection; this.keepAliveAppIds = keepAliveAppIds; this.latestResponse = latestResponse; + this.currentMasterKey = currentMasterKey; } public NodeHealthStatus getNodeHealthStatus() { @@ -58,4 +61,8 @@ public HeartbeatResponse getLatestResponse() { public List getKeepAliveAppIds() { return this.keepAliveAppIds; } + + public MasterKey getCurrentMasterKey() { + return this.currentMasterKey; + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java index 0d8e563f28..88408810ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java @@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; /** * This interface is the one implemented by the schedulers. It mainly extends @@ -38,9 +37,7 @@ public interface ResourceScheduler extends YarnScheduler, Recoverable { /** * Re-initialize the ResourceScheduler. * @param conf configuration - * @param secretManager token-secret manager * @throws IOException */ - void reinitialize(Configuration conf, - ContainerTokenSecretManager secretManager, RMContext rmContext) throws IOException; + void reinitialize(Configuration conf, RMContext rmContext) throws IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 8028e133ee..df01f59f4c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -35,7 +35,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.Lock; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; @@ -46,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; @@ -75,7 +75,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; @LimitedPrivate("yarn") @Evolving @@ -126,7 +126,6 @@ public Configuration getConf() { private CapacitySchedulerConfiguration conf; private YarnConfiguration yarnConf; - private ContainerTokenSecretManager containerTokenSecretManager; private RMContext rmContext; private Map queues = new ConcurrentHashMap(); @@ -163,8 +162,8 @@ public CapacitySchedulerConfiguration getConfiguration() { } @Override - public ContainerTokenSecretManager getContainerTokenSecretManager() { - return containerTokenSecretManager; + public RMContainerTokenSecretManager getContainerTokenSecretManager() { + return this.rmContext.getContainerTokenSecretManager(); } @Override @@ -193,14 +192,12 @@ public Resource getClusterResources() { } @Override - public synchronized void reinitialize(Configuration conf, - ContainerTokenSecretManager containerTokenSecretManager, RMContext rmContext) - throws IOException { + public synchronized void + reinitialize(Configuration conf, RMContext rmContext) throws IOException { if (!initialized) { this.conf = new CapacitySchedulerConfiguration(conf); this.minimumAllocation = this.conf.getMinimumAllocation(); this.maximumAllocation = this.conf.getMaximumAllocation(); - this.containerTokenSecretManager = containerTokenSecretManager; this.rmContext = rmContext; initializeQueues(this.conf); initialized = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java index ef14ab164a..861d630af6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; /** * Read-only interface to {@link CapacityScheduler} context. @@ -33,7 +33,7 @@ public interface CapacitySchedulerContext { Resource getMaximumResourceCapability(); - ContainerTokenSecretManager getContainerTokenSecretManager(); + RMContainerTokenSecretManager getContainerTokenSecretManager(); int getNumClusterNodes(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index cf303cba8f..1a792b82c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -64,7 +64,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; @Private @@ -104,7 +104,7 @@ public class LeafQueue implements CSQueue { private final Resource maximumAllocation; private final float minimumAllocationFactor; - private ContainerTokenSecretManager containerTokenSecretManager; + private RMContainerTokenSecretManager containerTokenSecretManager; private Map users = new HashMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java index 7eb3be2d02..7b46d846d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java @@ -37,7 +37,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; @Private @@ -51,7 +51,7 @@ public class AppSchedulable extends Schedulable { private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); private static final Log LOG = LogFactory.getLog(AppSchedulable.class); private FSQueue queue; - private ContainerTokenSecretManager containerTokenSecretManager; + private RMContainerTokenSecretManager containerTokenSecretManager; public AppSchedulable(FairScheduler scheduler, FSSchedulerApp app, FSQueue queue) { this.scheduler = scheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 46ed93724e..e102622d2b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -73,7 +73,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; @LimitedPrivate("yarn") @Unstable @@ -82,7 +82,6 @@ public class FairScheduler implements ResourceScheduler { private boolean initialized; private FairSchedulerConfiguration conf; - private ContainerTokenSecretManager containerTokenSecretManager; private RMContext rmContext; private Resource minimumAllocation; private Resource maximumAllocation; @@ -413,8 +412,8 @@ private void updateRunnability() { } } - public ContainerTokenSecretManager getContainerTokenSecretManager() { - return this.containerTokenSecretManager; + public RMContainerTokenSecretManager getContainerTokenSecretManager() { + return this.rmContext.getContainerTokenSecretManager(); } public double getAppWeight(AppSchedulable app) { @@ -892,15 +891,11 @@ public void recover(RMState state) throws Exception { } @Override - public synchronized void reinitialize(Configuration conf, - ContainerTokenSecretManager containerTokenSecretManager, - RMContext rmContext) - throws IOException - { + public synchronized void + reinitialize(Configuration conf, RMContext rmContext) throws IOException { if (!this.initialized) { this.conf = new FairSchedulerConfiguration(conf); this.rootMetrics = QueueMetrics.forQueue("root", null, true, conf); - this.containerTokenSecretManager = containerTokenSecretManager; this.rmContext = rmContext; this.clock = new SystemClock(); this.eventLog = new FairSchedulerEventLog(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index f06207bc2b..aebf989a6e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -83,7 +83,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; @LimitedPrivate("yarn") @@ -97,7 +96,6 @@ public class FifoScheduler implements ResourceScheduler, Configurable { RecordFactoryProvider.getRecordFactory(null); Configuration conf; - private ContainerTokenSecretManager containerTokenSecretManager; private final static Container[] EMPTY_CONTAINER_ARRAY = new Container[] {}; private final static List EMPTY_CONTAINER_LIST = Arrays.asList(EMPTY_CONTAINER_ARRAY); @@ -193,14 +191,11 @@ public Resource getMaximumResourceCapability() { } @Override - public synchronized void reinitialize(Configuration conf, - ContainerTokenSecretManager containerTokenSecretManager, - RMContext rmContext) - throws IOException + public synchronized void + reinitialize(Configuration conf, RMContext rmContext) throws IOException { setConf(conf); if (!this.initialized) { - this.containerTokenSecretManager = containerTokenSecretManager; this.rmContext = rmContext; this.minimumAllocation = Resources.createResource(conf.getInt( @@ -543,8 +538,8 @@ private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application // If security is enabled, send the container-tokens too. if (UserGroupInformation.isSecurityEnabled()) { containerToken = - containerTokenSecretManager.createContainerToken(containerId, - nodeId, capability); + this.rmContext.getContainerTokenSecretManager() + .createContainerToken(containerId, nodeId, capability); if (containerToken == null) { return i; // Try again later. } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java new file mode 100644 index 0000000000..467b4ad5e7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java @@ -0,0 +1,153 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import java.util.Timer; +import java.util.TimerTask; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; + +/** + * SecretManager for ContainerTokens. This is RM-specific and rolls the + * master-keys every so often. + * + */ +public class RMContainerTokenSecretManager extends + BaseContainerTokenSecretManager { + + private static Log LOG = LogFactory + .getLog(RMContainerTokenSecretManager.class); + + private MasterKeyData nextMasterKey; + + private final Timer timer; + private final long rollingInterval; + private final long activationDelay; + + public RMContainerTokenSecretManager(Configuration conf) { + super(conf); + + this.timer = new Timer(); + this.rollingInterval = conf.getLong( + YarnConfiguration.RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, + YarnConfiguration.DEFAULT_RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS) * 1000; + // Add an activation delay. This is to address the following race: RM may + // roll over master-key, scheduling may happen at some point of time, a + // container created with a password generated off new master key, but NM + // might not have come again to RM to update the shared secret: so AM has a + // valid password generated off new secret but NM doesn't know about the + // secret yet. + // Adding delay = 1.5 * expiry interval makes sure that all active NMs get + // the updated shared-key. + this.activationDelay = + (long) (conf.getLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS) * 1.5); + LOG.info("ContainerTokenKeyRollingInterval: " + this.rollingInterval + + "ms and ContainerTokenKeyActivationDelay: " + this.activationDelay + + "ms"); + if (rollingInterval <= activationDelay * 2) { + throw new IllegalArgumentException( + YarnConfiguration.RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS + + " should be more than 2 X " + + YarnConfiguration.RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS); + } + } + + public void start() { + rollMasterKey(); + this.timer.scheduleAtFixedRate(new MasterKeyRoller(), rollingInterval, + rollingInterval); + } + + public void stop() { + this.timer.cancel(); + } + + /** + * Creates a new master-key and sets it as the primary. + */ + @Private + protected void rollMasterKey() { + super.writeLock.lock(); + try { + LOG.info("Rolling master-key for container-tokens"); + if (this.currentMasterKey == null) { // Setting up for the first time. + this.currentMasterKey = createNewMasterKey(); + } else { + this.nextMasterKey = createNewMasterKey(); + this.timer.schedule(new NextKeyActivator(), this.activationDelay); + } + } finally { + super.writeLock.unlock(); + } + } + + @Private + public MasterKey getNextKey() { + super.readLock.lock(); + try { + if (this.nextMasterKey == null) { + return null; + } else { + return this.nextMasterKey.getMasterKey(); + } + } finally { + super.readLock.unlock(); + } + } + + /** + * Activate the new master-key + */ + @Private + protected void activateNextMasterKey() { + super.writeLock.lock(); + try { + LOG.info("Activating next master key with id: " + + this.nextMasterKey.getMasterKey().getKeyId()); + this.currentMasterKey = this.nextMasterKey; + this.nextMasterKey = null; + } finally { + super.writeLock.unlock(); + } + } + + private class MasterKeyRoller extends TimerTask { + @Override + public void run() { + rollMasterKey(); + } + } + + private class NextKeyActivator extends TimerTask { + @Override + public void run() { + // Activation will happen after an absolute time interval. It will be good + // if we can force activation after an NM updates and acknowledges a + // roll-over. But that is only possible when we move to per-NM keys. TODO: + activateNextMasterKey(); + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index f1172e20de..2de035ccbd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import com.google.common.collect.Lists; @@ -187,6 +188,11 @@ public List getAppsToCleanup() { public HeartbeatResponse getLastHeartBeatResponse() { return null; } + + @Override + public MasterKey getCurrentMasterKey() { + return null; + } }; private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 1133d56871..25dc899ea5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -95,7 +95,7 @@ public static RMContext mockRMContext(int n, long time) { rmDispatcher); return new RMContextImpl(new MemStore(), rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, null) { + null, null, null) { @Override public ConcurrentMap getRMApps() { return map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java index 36aa4502d7..d291b387dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import junit.framework.Assert; @@ -39,12 +38,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; @@ -201,7 +198,7 @@ public void testReconnectedNode() throws Exception { conf.setQueues("default", new String[] {"default"}); conf.setCapacity("default", 100); FifoScheduler fs = new FifoScheduler(); - fs.reinitialize(conf, null, null); + fs.reinitialize(conf, null); RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java index 4f6aaddb4b..b717ceeeb8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java @@ -17,29 +17,23 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; +import static junit.framework.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.net.InetAddress; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRPC.TestImpl; import org.apache.hadoop.ipc.TestRPC.TestProtocol; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; -import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.Keys; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; - -import org.apache.hadoop.net.NetUtils; - -import static org.mockito.Mockito.*; -import static junit.framework.Assert.*; -import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -48,7 +42,6 @@ * Tests {@link RMAuditLogger}. */ public class TestRMAuditLogger { - private static final Log LOG = LogFactory.getLog(TestRMAuditLogger.class); private static final String USER = "test"; private static final String OPERATION = "oper"; private static final String TARGET = "tgt"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index b177c9709c..bee7a39579 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -78,7 +78,7 @@ public void setUp() throws Exception { rmContext = new RMContextImpl(new MemStore(), rmDispatcher, null, null, null, - mock(DelegationTokenRenewer.class), null); + mock(DelegationTokenRenewer.class), null, null); scheduler = mock(YarnScheduler.class); doAnswer( new Answer() { @@ -102,7 +102,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { new TestSchedulerEventDispatcher()); NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); - node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null); + node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null, null); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index ec323ea59c..03d394af94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -62,7 +62,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java index 3780617eda..f495ada338 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java @@ -52,7 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index f03f051b5c..06d0682769 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -37,12 +37,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; -import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.NodeEventDispatcher; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService; import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.Records; import org.junit.Before; import org.junit.Test; @@ -71,7 +71,7 @@ public void setUp() { // Dispatcher that processes events inline Dispatcher dispatcher = new InlineDispatcher(); RMContext context = new RMContextImpl(new MemStore(), dispatcher, null, - null, null, null, null); + null, null, null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, @@ -82,8 +82,9 @@ public void setUp() { nmLivelinessMonitor.start(); NodesListManager nodesListManager = new NodesListManager(context); nodesListManager.init(conf); - ContainerTokenSecretManager containerTokenSecretManager = - new ContainerTokenSecretManager(conf); + RMContainerTokenSecretManager containerTokenSecretManager = + new RMContainerTokenSecretManager(conf); + containerTokenSecretManager.start(); resourceTrackerService = new ResourceTrackerService(context, nodesListManager, nmLivelinessMonitor, containerTokenSecretManager); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java index 0a4e1cba9c..384a1140fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java @@ -42,13 +42,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.Records; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +@SuppressWarnings("rawtypes") public class TestRMNMRPCResponseId { private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); ResourceTrackerService resourceTrackerService; @@ -66,14 +67,14 @@ public void handle(Event event) { }); RMContext context = new RMContextImpl(new MemStore(), dispatcher, null, null, null, - null, null); + null, null, null); dispatcher.register(RMNodeEventType.class, new ResourceManager.NodeEventDispatcher(context)); NodesListManager nodesListManager = new NodesListManager(context); Configuration conf = new Configuration(); nodesListManager.init(conf); - ContainerTokenSecretManager containerTokenSecretManager = - new ContainerTokenSecretManager(conf); + RMContainerTokenSecretManager containerTokenSecretManager = + new RMContainerTokenSecretManager(conf); resourceTrackerService = new ResourceTrackerService(context, nodesListManager, new NMLivelinessMonitor(dispatcher), containerTokenSecretManager); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 846455ac0d..c8182596db 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.Before; import org.junit.Test; @@ -119,9 +120,11 @@ public void setUp() throws Exception { mock(ContainerAllocationExpirer.class); AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class); AMLivelinessMonitor amFinishingMonitor = mock(AMLivelinessMonitor.class); - this.rmContext = new RMContextImpl(new MemStore(), rmDispatcher, - containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, new ApplicationTokenSecretManager(conf)); + this.rmContext = + new RMContextImpl(new MemStore(), rmDispatcher, + containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, + null, new ApplicationTokenSecretManager(conf), + new RMContainerTokenSecretManager(conf)); rmDispatcher.register(RMAppAttemptEventType.class, new TestApplicationAttemptEventDispatcher(this.rmContext)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index 7d3d207fac..c3da2c3928 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -72,6 +72,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.After; import org.junit.Before; @@ -154,9 +155,12 @@ public void setUp() throws Exception { mock(ContainerAllocationExpirer.class); amLivelinessMonitor = mock(AMLivelinessMonitor.class); amFinishingMonitor = mock(AMLivelinessMonitor.class); - rmContext = new RMContextImpl(new MemStore(), rmDispatcher, - containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, new ApplicationTokenSecretManager(new Configuration())); + Configuration conf = new Configuration(); + rmContext = + new RMContextImpl(new MemStore(), rmDispatcher, + containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, + null, new ApplicationTokenSecretManager(conf), + new RMContainerTokenSecretManager(conf)); scheduler = mock(YarnScheduler.class); masterService = mock(ApplicationMasterService.class); @@ -174,7 +178,7 @@ public void setUp() throws Exception { rmDispatcher.register(AMLauncherEventType.class, new TestAMLauncherEventDispatcher()); - rmDispatcher.init(new Configuration()); + rmDispatcher.init(conf); rmDispatcher.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index d3ec0357bf..0a4affed44 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.server.resourcemanager.Application; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; +import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.Task; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store; @@ -44,6 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -247,12 +249,13 @@ public void testRefreshQueues() throws Exception { CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); setupQueueConfiguration(conf); cs.setConf(new YarnConfiguration()); - cs.reinitialize(conf, null, null); + cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, + null, new RMContainerTokenSecretManager(conf))); checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); conf.setCapacity(A, 80f); conf.setCapacity(B, 20f); - cs.reinitialize(conf, null,null); + cs.reinitialize(conf,null); checkQueueCapacities(cs, 80f, 20f); } @@ -343,7 +346,8 @@ public void testParseQueue() throws IOException { conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100.0f); conf.setUserLimitFactor(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100.0f); - cs.reinitialize(conf, null, null); + cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, + null, new RMContainerTokenSecretManager(conf))); } @Test @@ -353,7 +357,8 @@ public void testReconnectedNode() throws Exception { setupQueueConfiguration(csConf); CapacityScheduler cs = new CapacityScheduler(); cs.setConf(new YarnConfiguration()); - cs.reinitialize(csConf, null, null); + cs.reinitialize(csConf, new RMContextImpl(null, null, null, null, null, null, + null, new RMContainerTokenSecretManager(csConf))); RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 9248206c7f..182547325f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -120,7 +120,7 @@ public void setUp() throws Exception { CapacityScheduler.applicationComparator, TestUtils.spyHook); - cs.reinitialize(csConf, null, rmContext); + cs.reinitialize(csConf, rmContext); } private static final String A = "a"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java index 8cb5b07b65..4008c9499e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java @@ -23,8 +23,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.Test; public class TestQueueParsing { @@ -42,7 +42,8 @@ public void testQueueParsing() throws Exception { CapacityScheduler capacityScheduler = new CapacityScheduler(); capacityScheduler.setConf(conf); - capacityScheduler.reinitialize(conf, null, null); + capacityScheduler.reinitialize(conf, new RMContextImpl(null, null, null, + null, null, null, null, new RMContainerTokenSecretManager(conf))); CSQueue a = capacityScheduler.getQueue("a"); Assert.assertEquals(0.10, a.getAbsoluteCapacity(), DELTA); @@ -138,7 +139,7 @@ public void testRootQueueParsing() throws Exception { CapacityScheduler capacityScheduler = new CapacityScheduler(); capacityScheduler.setConf(new YarnConfiguration()); - capacityScheduler.reinitialize(conf, null, null); + capacityScheduler.reinitialize(conf, null); } public void testMaxCapacity() throws Exception { @@ -161,7 +162,7 @@ public void testMaxCapacity() throws Exception { try { capacityScheduler = new CapacityScheduler(); capacityScheduler.setConf(new YarnConfiguration()); - capacityScheduler.reinitialize(conf, null, null); + capacityScheduler.reinitialize(conf, null); } catch (IllegalArgumentException iae) { fail = true; } @@ -173,7 +174,7 @@ public void testMaxCapacity() throws Exception { // Now this should work capacityScheduler = new CapacityScheduler(); capacityScheduler.setConf(new YarnConfiguration()); - capacityScheduler.reinitialize(conf, null, null); + capacityScheduler.reinitialize(conf, null); fail = false; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 5e146843ef..2765816749 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; public class TestUtils { private static final Log LOG = LogFactory.getLog(TestUtils.class); @@ -79,9 +80,11 @@ public EventHandler getEventHandler() { ContainerAllocationExpirer cae = new ContainerAllocationExpirer(nullDispatcher); + Configuration conf = new Configuration(); RMContext rmContext = new RMContextImpl(null, nullDispatcher, cae, null, null, null, - new ApplicationTokenSecretManager(new Configuration())); + new ApplicationTokenSecretManager(conf), + new RMContainerTokenSecretManager(conf)); return rmContext; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 2b8d6cc029..8419eb4249 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -100,7 +100,7 @@ public void setUp() throws IOException { resourceManager = new ResourceManager(store); resourceManager.init(conf); ((AsyncDispatcher)resourceManager.getRMContext().getDispatcher()).start(); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); } @After @@ -280,7 +280,7 @@ public void testSimpleContainerReservation() throws InterruptedException { public void testUserAsDefaultQueue() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true"); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent( createAppAttemptId(1, 1), "default", "user1"); scheduler.handle(appAddedEvent); @@ -288,7 +288,7 @@ public void testUserAsDefaultQueue() throws Exception { assertEquals(0, scheduler.getQueueManager().getQueue("default").getApplications().size()); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false"); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); AppAddedSchedulerEvent appAddedEvent2 = new AppAddedSchedulerEvent( createAppAttemptId(2, 1), "default", "user2"); scheduler.handle(appAddedEvent2); @@ -301,7 +301,7 @@ public void testUserAsDefaultQueue() throws Exception { public void testFairShareWithMinAlloc() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); @@ -407,7 +407,7 @@ public void testAppAdditionAndRemoval() throws Exception { public void testAllocationFileParsing() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); @@ -510,7 +510,7 @@ public void testAllocationFileParsing() throws Exception { public void testBackwardsCompatibleAllocationFileParsing() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); @@ -613,7 +613,7 @@ public void testBackwardsCompatibleAllocationFileParsing() throws Exception { public void testIsStarvedForMinShare() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); @@ -672,7 +672,7 @@ else if (p.getName().equals("queueB")) { public void testIsStarvedForFairShare() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); @@ -736,7 +736,7 @@ else if (p.getName().equals("queueB")) { public void testChoiceOfPreemptedContainers() throws Exception { Configuration conf = new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE + ".allocation.file", ALLOC_FILE); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); @@ -871,7 +871,7 @@ public void testPreemptionDecision() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); MockClock clock = new MockClock(); scheduler.setClock(clock); - scheduler.reinitialize(conf, null, resourceManager.getRMContext()); + scheduler.reinitialize(conf, resourceManager.getRMContext()); PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 25c10d1798..317f8923e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -87,10 +87,10 @@ public void testFifoSchedulerCapacityWhenNoNMs() { public void testAppAttemptMetrics() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); RMContext rmContext = new RMContextImpl(null, dispatcher, null, - null, null, null, null); + null, null, null, null, null); FifoScheduler schedular = new FifoScheduler(); - schedular.reinitialize(new Configuration(), null, rmContext); + schedular.reinitialize(new Configuration(), rmContext); ApplicationId appId = BuilderUtils.newApplicationId(200, 1); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index fdaf948a22..c9ca663f04 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.webapp.WebApps; @@ -159,7 +160,7 @@ public static RMContext mockRMContext(int numApps, int racks, int numNodes, deactivatedNodesMap.put(node.getHostName(), node); } return new RMContextImpl(new MemStore(), null, null, null, null, - null, null) { + null, null, null) { @Override public ConcurrentMap getRMApps() { return applicationsMaps; @@ -199,7 +200,8 @@ public static CapacityScheduler mockCapacityScheduler() throws IOException { CapacityScheduler cs = new CapacityScheduler(); cs.setConf(new YarnConfiguration()); - cs.reinitialize(conf, null, null); + cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, + null, new RMContainerTokenSecretManager(conf))); return cs; } @@ -275,7 +277,7 @@ public static FifoScheduler mockFifoScheduler() throws Exception { FifoScheduler fs = new FifoScheduler(); fs.setConf(new YarnConfiguration()); - fs.reinitialize(conf, null, null); + fs.reinitialize(conf, null); return fs; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index cb4a3af5f5..14fc685c5b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -145,7 +145,7 @@ public void testNodesDefaultWithUnHealthyNode() throws JSONException, nodeHealth.setHealthReport("test health report"); nodeHealth.setIsNodeHealthy(false); node.handle(new RMNodeStatusEvent(nm3.getNodeId(), nodeHealth, - new ArrayList(), null, null)); + new ArrayList(), null, null, null)); rm.NMwaitForState(nm3.getNodeId(), NodeState.UNHEALTHY); ClientResponse response = @@ -360,7 +360,7 @@ public void testNodesQueryHealthyAndState() throws JSONException, Exception { nodeHealth.setHealthReport("test health report"); nodeHealth.setIsNodeHealthy(false); node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeHealth, - new ArrayList(), null, null)); + new ArrayList(), null, null, null)); rm.NMwaitForState(nm1.getNodeId(), NodeState.UNHEALTHY); ClientResponse response = r.path("ws").path("v1").path("cluster") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index 588512420f..1a0885f43b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -21,7 +21,6 @@ import java.io.File; import java.io.IOException; import java.net.InetAddress; -import java.net.ServerSocket; import java.net.UnknownHostException; import org.apache.commons.logging.Log; @@ -51,7 +50,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store; import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; -import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.service.CompositeService; @@ -290,10 +288,9 @@ protected void doSecureLogin() throws IOException { @Override protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker, - ContainerTokenSecretManager containerTokenSecretManager) { + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { return new NodeStatusUpdaterImpl(context, dispatcher, - healthChecker, metrics, containerTokenSecretManager) { + healthChecker, metrics) { @Override protected ResourceTracker getRMClient() { final ResourceTrackerService rt = resourceManager diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index 1d731cfd97..527d4ff40f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -46,6 +46,7 @@ import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; @@ -86,6 +87,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; @@ -220,7 +222,7 @@ public void testMaliceUser() throws IOException, InterruptedException { Resource modifiedResource = BuilderUtils.newResource(2048); ContainerTokenIdentifier modifiedIdentifier = new ContainerTokenIdentifier( dummyIdentifier.getContainerID(), dummyIdentifier.getNmHostAddress(), - modifiedResource, Long.MAX_VALUE); + modifiedResource, Long.MAX_VALUE, 0); Token modifiedToken = new Token( modifiedIdentifier.getBytes(), containerToken.getPassword().array(), new Text(containerToken.getKind()), new Text(containerToken @@ -250,12 +252,17 @@ public Void run() { Assert.assertEquals( java.lang.reflect.UndeclaredThrowableException.class .getCanonicalName(), e.getClass().getCanonicalName()); + Assert.assertEquals(RemoteException.class.getCanonicalName(), e + .getCause().getClass().getCanonicalName()); + Assert.assertEquals( + "org.apache.hadoop.security.token.SecretManager$InvalidToken", + ((RemoteException) e.getCause()).getClassName()); Assert.assertTrue(e - .getCause() - .getMessage() - .contains( - "DIGEST-MD5: digest response format violation. " - + "Mismatched response.")); + .getCause() + .getMessage() + .matches( + "Given Container container_\\d*_\\d*_\\d\\d_\\d*" + + " seems to have an illegally generated token.")); } return null; } @@ -331,12 +338,15 @@ public ContainerManager run() { unauthorizedUser = UserGroupInformation .createRemoteUser(containerID.toString()); + RMContainerTokenSecretManager containerTokenSecreteManager = + resourceManager.getRMContainerTokenSecretManager(); final ContainerTokenIdentifier newTokenId = new ContainerTokenIdentifier(tokenId.getContainerID(), tokenId.getNmHostAddress(), tokenId.getResource(), - System.currentTimeMillis() - 1); + System.currentTimeMillis() - 1, + containerTokenSecreteManager.getCurrentKey().getKeyId()); byte[] passowrd = - resourceManager.getContainerTokenSecretManager().createPassword( + containerTokenSecreteManager.createPassword( newTokenId); // Create a valid token by using the key from the RM. token = new Token( From 56285e1beea269c66b6d89fcd6610eaa25e38ce6 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Sun, 26 Aug 2012 00:31:12 +0000 Subject: [PATCH 05/62] HDFS-3683. Edit log replay progress indicator shows >100% complete. Contributed by Plamen Jeliazkov. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377367 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0f4c5263d1..b321357821 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -679,6 +679,9 @@ Branch-2 ( Unreleased changes ) HDFS-3731. 2.0 release upgrade must handle blocks being written from 1.0. (Colin Patrick McCabe via eli) + HDFS-3683. Edit log replay progress indicator shows >100% complete. (Plamen + Jeliazkov via atm) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index ad7f71cfe9..78b28a9588 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -191,8 +191,9 @@ long loadEditRecords(EditLogInputStream in, boolean closeOnExit, if (op.hasTransactionId()) { long now = now(); if (now - lastLogTime > REPLAY_TRANSACTION_LOG_INTERVAL) { - int percent = Math.round((float)lastAppliedTxId / numTxns * 100); - LOG.info("replaying edit log: " + lastAppliedTxId + "/" + numTxns + long deltaTxId = lastAppliedTxId - expectedStartingTxId + 1; + int percent = Math.round((float) deltaTxId / numTxns * 100); + LOG.info("replaying edit log: " + deltaTxId + "/" + numTxns + " transactions completed. (" + percent + "%)"); lastLogTime = now; } From 735046ebecd9e803398be56fbf79dbde5226b4c1 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Sun, 26 Aug 2012 04:00:26 +0000 Subject: [PATCH 06/62] HDFS-3851. DFSOutputStream class code cleanup. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377372 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../apache/hadoop/hdfs/DFSOutputStream.java | 69 +++++++++---------- 2 files changed, 36 insertions(+), 35 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b321357821..db82a6c6f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -129,6 +129,8 @@ Trunk (unreleased changes) HDFS-3844. Add @Override and remove {@inheritdoc} and unnecessary imports. (Jing Zhao via suresh) + HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index a04d8af4c3..8e51d7c049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -56,8 +56,8 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; @@ -107,8 +107,8 @@ ****************************************************************/ @InterfaceAudience.Private public class DFSOutputStream extends FSOutputSummer implements Syncable { - private final DFSClient dfsClient; private static final int MAX_PACKETS = 80; // each packet 64K, total 5MB + private final DFSClient dfsClient; private Socket s; // closed is accessed by different threads under different locks. private volatile boolean closed = false; @@ -138,15 +138,15 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable { private final short blockReplication; // replication factor of file private boolean shouldSyncBlock = false; // force blocks to disk upon close - private class Packet { - long seqno; // sequencenumber of buffer in block - long offsetInBlock; // offset in block - private boolean lastPacketInBlock; // is this the last packet in block? - boolean syncBlock; // this packet forces the current block to disk - int numChunks; // number of chunks currently in packet - int maxChunks; // max chunks in packet - + private static class Packet { + private static final long HEART_BEAT_SEQNO = -1L; + long seqno; // sequencenumber of buffer in block + final long offsetInBlock; // offset in block + boolean syncBlock; // this packet forces the current block to disk + int numChunks; // number of chunks currently in packet + final int maxChunks; // max chunks in packet byte[] buf; + private boolean lastPacketInBlock; // is this the last packet in block? /** * buf is pointed into like follows: @@ -164,45 +164,36 @@ private class Packet { */ int checksumStart; int checksumPos; - int dataStart; + final int dataStart; int dataPos; - private static final long HEART_BEAT_SEQNO = -1L; - /** * Create a heartbeat packet. */ - Packet() { - this.lastPacketInBlock = false; - this.numChunks = 0; - this.offsetInBlock = 0; - this.seqno = HEART_BEAT_SEQNO; - - buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN]; - - checksumStart = checksumPos = dataPos = dataStart = PacketHeader.PKT_MAX_HEADER_LEN; - maxChunks = 0; + Packet(int checksumSize) { + this(0, 0, 0, HEART_BEAT_SEQNO, checksumSize); } /** * Create a new packet. * - * @param pktSize maximum size of the packet, including checksum data and actual data. + * @param pktSize maximum size of the packet, + * including checksum data and actual data. * @param chunksPerPkt maximum number of chunks per packet. * @param offsetInBlock offset in bytes into the HDFS block. */ - Packet(int pktSize, int chunksPerPkt, long offsetInBlock) { + Packet(int pktSize, int chunksPerPkt, long offsetInBlock, + long seqno, int checksumSize) { this.lastPacketInBlock = false; this.numChunks = 0; this.offsetInBlock = offsetInBlock; - this.seqno = currentSeqno; - currentSeqno++; + this.seqno = seqno; buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize]; checksumStart = PacketHeader.PKT_MAX_HEADER_LEN; checksumPos = checksumStart; - dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize()); + dataStart = checksumStart + (chunksPerPkt * checksumSize); dataPos = dataStart; maxChunks = chunksPerPkt; } @@ -412,6 +403,7 @@ public void run() { response.join(); response = null; } catch (InterruptedException e) { + DFSClient.LOG.warn("Caught exception ", e); } } @@ -439,6 +431,7 @@ public void run() { try { dataQueue.wait(timeout); } catch (InterruptedException e) { + DFSClient.LOG.warn("Caught exception ", e); } doSleep = false; now = Time.now(); @@ -448,7 +441,7 @@ public void run() { } // get packet to be sent. if (dataQueue.isEmpty()) { - one = new Packet(); // heartbeat packet + one = new Packet(checksum.getChecksumSize()); // heartbeat packet } else { one = dataQueue.getFirst(); // regular data packet } @@ -488,6 +481,7 @@ public void run() { // wait for acks to arrive from datanodes dataQueue.wait(1000); } catch (InterruptedException e) { + DFSClient.LOG.warn("Caught exception ", e); } } } @@ -518,7 +512,7 @@ public void run() { blockStream.flush(); } catch (IOException e) { // HDFS-3398 treat primary DN is down since client is unable to - // write to primary DN + // write to primary DN errorIndex = 0; throw e; } @@ -607,6 +601,7 @@ private void closeResponder() { response.close(); response.join(); } catch (InterruptedException e) { + DFSClient.LOG.warn("Caught exception ", e); } finally { response = null; } @@ -1178,6 +1173,7 @@ private LocatedBlock locateFollowingBlock(long start, Thread.sleep(sleeptime); sleeptime *= 2; } catch (InterruptedException ie) { + DFSClient.LOG.warn("Caught exception ", ie); } } } else { @@ -1421,7 +1417,7 @@ protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] che if (currentPacket == null) { currentPacket = new Packet(packetSize, chunksPerPacket, - bytesCurBlock); + bytesCurBlock, currentSeqno++, this.checksum.getChecksumSize()); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno=" + currentPacket.seqno + @@ -1468,7 +1464,8 @@ protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] che // indicate the end of block and reset bytesCurBlock. // if (bytesCurBlock == blockSize) { - currentPacket = new Packet(0, 0, bytesCurBlock); + currentPacket = new Packet(0, 0, bytesCurBlock, + currentSeqno++, this.checksum.getChecksumSize()); currentPacket.lastPacketInBlock = true; currentPacket.syncBlock = shouldSyncBlock; waitAndQueueCurrentPacket(); @@ -1540,7 +1537,7 @@ private void flushOrSync(boolean isSync) throws IOException { // but sync was requested. // Send an empty packet currentPacket = new Packet(packetSize, chunksPerPacket, - bytesCurBlock); + bytesCurBlock, currentSeqno++, this.checksum.getChecksumSize()); } } else { // We already flushed up to this offset. @@ -1557,7 +1554,7 @@ private void flushOrSync(boolean isSync) throws IOException { // and sync was requested. // So send an empty sync packet. currentPacket = new Packet(packetSize, chunksPerPacket, - bytesCurBlock); + bytesCurBlock, currentSeqno++, this.checksum.getChecksumSize()); } else { // just discard the current packet since it is already been sent. currentPacket = null; @@ -1738,7 +1735,8 @@ public synchronized void close() throws IOException { if (bytesCurBlock != 0) { // send an empty packet to mark the end of the block - currentPacket = new Packet(0, 0, bytesCurBlock); + currentPacket = new Packet(0, 0, bytesCurBlock, + currentSeqno++, this.checksum.getChecksumSize()); currentPacket.lastPacketInBlock = true; currentPacket.syncBlock = shouldSyncBlock; } @@ -1778,6 +1776,7 @@ private void completeFile(ExtendedBlock last) throws IOException { DFSClient.LOG.info("Could not complete file " + src + " retrying..."); } } catch (InterruptedException ie) { + DFSClient.LOG.warn("Caught exception ", ie); } } } From 10e704c50ba1fa601329d0fee099993e8c3725a6 Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Mon, 27 Aug 2012 18:27:18 +0000 Subject: [PATCH 07/62] YARN-29. Add a yarn-client module. (Contributed by Vinod Kumar Vavilapalli) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377781 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-project/pom.xml | 6 + hadoop-yarn-project/CHANGES.txt | 2 + .../pom.xml | 4 + .../applications/distributedshell/Client.java | 123 ++------- .../dev-support/findbugs-exclude.xml | 19 ++ .../hadoop-yarn/hadoop-yarn-client/pom.xml | 37 +++ .../org/hadoop/yarn/client/YarnClient.java | 234 ++++++++++++++++ .../hadoop/yarn/client/YarnClientImpl.java | 258 ++++++++++++++++++ .../hadoop/yarn/client/TestYarnClient.java | 30 ++ hadoop-yarn-project/hadoop-yarn/pom.xml | 6 +- 10 files changed, 613 insertions(+), 106 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/dev-support/findbugs-exclude.xml create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClient.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClientImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 8398e50a69..f23adc7bd0 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -118,6 +118,12 @@ ${project.version} + + org.apache.hadoop + hadoop-yarn-client + ${project.version} + + org.apache.hadoop hadoop-mapreduce-client-core diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ca1810ed50..734e0e1a7c 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -33,6 +33,8 @@ Release 2.1.0-alpha - Unreleased IMPROVEMENTS + YARN-29. Add a yarn-client module. (Vinod Kumar Vavilapalli via sseth) + BUG FIXES YARN-12. Fix findbugs warnings in FairScheduler. (Junping Du via acmurthy) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index babaa34a83..d29edc1be2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -41,6 +41,10 @@ org.apache.hadoop hadoop-yarn-common + + org.apache.hadoop + hadoop-yarn-client + org.apache.hadoop hadoop-yarn-server-nodemanager diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 1d598d8310..c5e04a8c4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -44,20 +43,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ClientRMProtocol; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; -import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -73,12 +60,12 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; - +import org.hadoop.yarn.client.YarnClientImpl; /** * Client for Distributed Shell application submission to YARN. @@ -113,19 +100,13 @@ */ @InterfaceAudience.Public @InterfaceStability.Unstable -public class Client { +public class Client extends YarnClientImpl { private static final Log LOG = LogFactory.getLog(Client.class); // Configuration private Configuration conf; - // RPC to communicate to RM - private YarnRPC rpc; - - // Handle to talk to the Resource Manager/Applications Manager - private ClientRMProtocol applicationsManager; - // Application master specific info to register a new Application with RM/ASM private String appName = ""; // App master priority @@ -196,9 +177,9 @@ public static void main(String[] args) { /** */ public Client(Configuration conf) throws Exception { - // Set up the configuration and RPC + super(); this.conf = conf; - rpc = YarnRPC.create(conf); + init(conf); } /** @@ -328,22 +309,17 @@ public boolean init(String[] args) throws ParseException { * @throws IOException */ public boolean run() throws IOException { - LOG.info("Starting Client"); - // Connect to ResourceManager - connectToASM(); - assert(applicationsManager != null); + LOG.info("Running Client"); + start(); - // Use ClientRMProtocol handle to general cluster information - GetClusterMetricsRequest clusterMetricsReq = Records.newRecord(GetClusterMetricsRequest.class); - GetClusterMetricsResponse clusterMetricsResp = applicationsManager.getClusterMetrics(clusterMetricsReq); + YarnClusterMetrics clusterMetrics = super.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" - + ", numNodeManagers=" + clusterMetricsResp.getClusterMetrics().getNumNodeManagers()); + + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); - GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class); - GetClusterNodesResponse clusterNodesResp = applicationsManager.getClusterNodes(clusterNodesReq); + List clusterNodeReports = super.getNodeReports(); LOG.info("Got Cluster node info from ASM"); - for (NodeReport node : clusterNodesResp.getNodeReports()) { + for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() @@ -352,10 +328,7 @@ public boolean run() throws IOException { + ", nodeHealthStatus" + node.getNodeHealthStatus()); } - GetQueueInfoRequest queueInfoReq = Records.newRecord(GetQueueInfoRequest.class); - queueInfoReq.setQueueName(this.amQueue); - GetQueueInfoResponse queueInfoResp = applicationsManager.getQueueInfo(queueInfoReq); - QueueInfo queueInfo = queueInfoResp.getQueueInfo(); + QueueInfo queueInfo = super.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() @@ -363,9 +336,7 @@ public boolean run() throws IOException { + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); - GetQueueUserAclsInfoRequest queueUserAclsReq = Records.newRecord(GetQueueUserAclsInfoRequest.class); - GetQueueUserAclsInfoResponse queueUserAclsResp = applicationsManager.getQueueUserAcls(queueUserAclsReq); - List listAclInfo = queueUserAclsResp.getUserAclsInfoList(); + List listAclInfo = super.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" @@ -375,7 +346,7 @@ public boolean run() throws IOException { } // Get a new application id - GetNewApplicationResponse newApp = getApplication(); + GetNewApplicationResponse newApp = super.getNewApplication(); ApplicationId appId = newApp.getApplicationId(); // TODO get min/max resource capabilities from RM and change memory ask if needed @@ -590,16 +561,12 @@ else if (amMemory > maxMem) { // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); - // Create the request to send to the applications manager - SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class); - appRequest.setApplicationSubmissionContext(appContext); - // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); - applicationsManager.submitApplication(appRequest); + super.submitApplication(appContext); // TODO // Try submitting the same request again @@ -629,10 +596,7 @@ private boolean monitorApplication(ApplicationId appId) throws YarnRemoteExcepti } // Get application report for the appId we are interested in - GetApplicationReportRequest reportRequest = Records.newRecord(GetApplicationReportRequest.class); - reportRequest.setApplicationId(appId); - GetApplicationReportResponse reportResponse = applicationsManager.getApplicationReport(reportRequest); - ApplicationReport report = reportResponse.getApplicationReport(); + ApplicationReport report = super.getApplicationReport(appId); LOG.info("Got application report from ASM for" + ", appId=" + appId.getId() @@ -671,7 +635,7 @@ else if (YarnApplicationState.KILLED == state if (System.currentTimeMillis() > (clientStartTime + clientTimeout)) { LOG.info("Reached client specified timeout for application. Killing application"); - killApplication(appId); + forceKillApplication(appId); return false; } } @@ -683,61 +647,14 @@ else if (YarnApplicationState.KILLED == state * @param appId Application Id to be killed. * @throws YarnRemoteException */ - private void killApplication(ApplicationId appId) throws YarnRemoteException { - KillApplicationRequest request = Records.newRecord(KillApplicationRequest.class); + private void forceKillApplication(ApplicationId appId) throws YarnRemoteException { // TODO clarify whether multiple jobs with the same app id can be submitted and be running at // the same time. // If yes, can we kill a particular attempt only? - request.setApplicationId(appId); - // KillApplicationResponse response = applicationsManager.forceKillApplication(request); + // Response can be ignored as it is non-null on success or // throws an exception in case of failures - applicationsManager.forceKillApplication(request); - } - - /** - * Connect to the Resource Manager/Applications Manager - * @return Handle to communicate with the ASM - * @throws IOException - */ - private void connectToASM() throws IOException { - - /* - UserGroupInformation user = UserGroupInformation.getCurrentUser(); - applicationsManager = user.doAs(new PrivilegedAction() { - public ClientRMProtocol run() { - InetSocketAddress rmAddress = NetUtils.createSocketAddr(conf.get( - YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS)); - LOG.info("Connecting to ResourceManager at " + rmAddress); - Configuration appsManagerServerConf = new Configuration(conf); - appsManagerServerConf.setClass(YarnConfiguration.YARN_SECURITY_INFO, - ClientRMSecurityInfo.class, SecurityInfo.class); - ClientRMProtocol asm = ((ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, appsManagerServerConf)); - return asm; - } - }); - */ - YarnConfiguration yarnConf = new YarnConfiguration(conf); - InetSocketAddress rmAddress = yarnConf.getSocketAddr( - YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_PORT); - LOG.info("Connecting to ResourceManager at " + rmAddress); - applicationsManager = ((ClientRMProtocol) rpc.getProxy( - ClientRMProtocol.class, rmAddress, conf)); - } - - /** - * Get a new application from the ASM - * @return New Application - * @throws YarnRemoteException - */ - private GetNewApplicationResponse getApplication() throws YarnRemoteException { - GetNewApplicationRequest request = Records.newRecord(GetNewApplicationRequest.class); - GetNewApplicationResponse response = applicationsManager.getNewApplication(request); - LOG.info("Got new application id=" + response.getApplicationId()); - return response; + super.killApplication(appId); } private static String getTestRuntimeClasspath() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/dev-support/findbugs-exclude.xml new file mode 100644 index 0000000000..0e037a2ad0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/dev-support/findbugs-exclude.xml @@ -0,0 +1,19 @@ + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml new file mode 100644 index 0000000000..2646f6d0dc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -0,0 +1,37 @@ + + + + 4.0.0 + + hadoop-yarn + org.apache.hadoop + 3.0.0-SNAPSHOT + + org.apache.hadoop + hadoop-yarn-client + 3.0.0-SNAPSHOT + hadoop-yarn-client + + + + org.apache.hadoop + hadoop-yarn-api + + + org.apache.hadoop + hadoop-yarn-common + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClient.java new file mode 100644 index 0000000000..eca80c919f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClient.java @@ -0,0 +1,234 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.hadoop.yarn.client; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.DelegationToken; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.QueueInfo; +import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; +import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +import org.apache.hadoop.yarn.service.Service; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface YarnClient extends Service { + + /** + *

+ * Obtain a new {@link ApplicationId} for submitting new applications. + *

+ * + *

+ * Returns a response which contains {@link ApplicationId} that can be used to + * submit a new application. See + * {@link #submitApplication(ApplicationSubmissionContext)}. + *

+ * + *

+ * See {@link GetNewApplicationResponse} for other information that is + * returned. + *

+ * + * @return response containing the new ApplicationId to be used + * to submit an application + * @throws YarnRemoteException + */ + GetNewApplicationResponse getNewApplication() throws YarnRemoteException; + + /** + *

+ * Submit a new application to YARN. + *

+ * + * @param appContext + * {@link ApplicationSubmissionContext} containing all the details + * needed to submit a new application + * @return {@link ApplicationId} of the accepted application + * @throws YarnRemoteException + * @see #getNewApplication() + */ + ApplicationId submitApplication(ApplicationSubmissionContext appContext) + throws YarnRemoteException; + + /** + *

+ * Kill an application identified by given ID. + *

+ * + * @param applicationId + * {@link ApplicationId} of the application that needs to be killed + * @throws YarnRemoteException + * in case of errors or if YARN rejects the request due to + * access-control restrictions. + * @see #getQueueAclsInfo() + */ + void killApplication(ApplicationId applicationId) throws YarnRemoteException; + + /** + *

+ * Get a report of the given Application. + *

+ * + *

+ * In secure mode, YARN verifies access to the application, queue + * etc. before accepting the request. + *

+ * + *

+ * If the user does not have VIEW_APP access then the following + * fields in the report will be set to stubbed values: + *

    + *
  • host - set to "N/A"
  • + *
  • RPC port - set to -1
  • + *
  • client token - set to "N/A"
  • + *
  • diagnostics - set to "N/A"
  • + *
  • tracking URL - set to "N/A"
  • + *
  • original tracking URL - set to "N/A"
  • + *
  • resource usage report - all values are -1
  • + *
+ *

+ * + * @param appId + * {@link ApplicationId} of the application that needs a report + * @return application report + * @throws YarnRemoteException + */ + ApplicationReport getApplicationReport(ApplicationId appId) + throws YarnRemoteException; + + /** + *

+ * Get a report (ApplicationReport) of all Applications in the cluster. + *

+ * + *

+ * If the user does not have VIEW_APP access for an application + * then the corresponding report will be filtered as described in + * {@link #getApplicationReport(ApplicationId)}. + *

+ * + * @return a list of reports of all running applications + * @throws YarnRemoteException + */ + List getApplicationList() throws YarnRemoteException; + + /** + *

+ * Get metrics ({@link YarnClusterMetrics}) about the cluster. + *

+ * + * @return cluster metrics + * @throws YarnRemoteException + */ + YarnClusterMetrics getYarnClusterMetrics() throws YarnRemoteException; + + /** + *

+ * Get a report of all nodes ({@link NodeReport}) in the cluster. + *

+ * + * @return A list of report of all nodes + * @throws YarnRemoteException + */ + List getNodeReports() throws YarnRemoteException; + + /** + *

+ * Get a delegation token so as to be able to talk to YARN using those tokens. + * + * @param renewer + * Address of the renewer who can renew these tokens when needed by + * securely talking to YARN. + * @return a delegation token ({@link DelegationToken}) that can be used to + * talk to YARN + * @throws YarnRemoteException + */ + DelegationToken getRMDelegationToken(Text renewer) throws YarnRemoteException; + + /** + *

+ * Get information ({@link QueueInfo}) about a given queue. + *

+ * + * @param queueName + * Name of the queue whose information is needed + * @return queue information + * @throws YarnRemoteException + * in case of errors or if YARN rejects the request due to + * access-control restrictions. + */ + QueueInfo getQueueInfo(String queueName) throws YarnRemoteException; + + /** + *

+ * Get information ({@link QueueInfo}) about all queues, recursively if there + * is a hierarchy + *

+ * + * @return a list of queue-information for all queues + * @throws YarnRemoteException + */ + List getAllQueues() throws YarnRemoteException; + + /** + *

+ * Get information ({@link QueueInfo}) about top level queues. + *

+ * + * @return a list of queue-information for all the top-level queues + * @throws YarnRemoteException + */ + List getRootQueueInfos() throws YarnRemoteException; + + /** + *

+ * Get information ({@link QueueInfo}) about all the immediate children queues + * of the given queue + *

+ * + * @param parent + * Name of the queue whose child-queues' information is needed + * @return a list of queue-information for all queues who are direct children + * of the given parent queue. + * @throws YarnRemoteException + */ + List getChildQueueInfos(String parent) throws YarnRemoteException; + + /** + *

+ * Get information about acls for current user on all the + * existing queues. + *

+ * + * @return a list of queue acls ({@link QueueUserACLInfo}) for + * current user + * @throws YarnRemoteException + */ + List getQueueAclsInfo() throws YarnRemoteException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClientImpl.java new file mode 100644 index 0000000000..927a261e75 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/hadoop/yarn/client/YarnClientImpl.java @@ -0,0 +1,258 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.hadoop.yarn.client; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.yarn.api.ClientRMProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.DelegationToken; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.QueueInfo; +import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.service.AbstractService; +import org.apache.hadoop.yarn.util.Records; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class YarnClientImpl extends AbstractService implements YarnClient { + + private static final Log LOG = LogFactory.getLog(YarnClientImpl.class); + + protected ClientRMProtocol rmClient; + protected InetSocketAddress rmAddress; + + private static final String ROOT = "root"; + + public YarnClientImpl() { + super(YarnClientImpl.class.getName()); + } + + private static InetSocketAddress getRmAddress(Configuration conf) { + return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); + } + + @Override + public synchronized void init(Configuration conf) { + this.rmAddress = getRmAddress(conf); + super.init(conf); + } + + @Override + public synchronized void start() { + YarnRPC rpc = YarnRPC.create(getConfig()); + + this.rmClient = + (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, + getConfig()); + LOG.debug("Connecting to ResourceManager at " + rmAddress); + super.start(); + } + + @Override + public synchronized void stop() { + RPC.stopProxy(this.rmClient); + super.stop(); + } + + @Override + public GetNewApplicationResponse getNewApplication() + throws YarnRemoteException { + GetNewApplicationRequest request = + Records.newRecord(GetNewApplicationRequest.class); + return rmClient.getNewApplication(request); + } + + @Override + public ApplicationId + submitApplication(ApplicationSubmissionContext appContext) + throws YarnRemoteException { + ApplicationId applicationId = appContext.getApplicationId(); + appContext.setApplicationId(applicationId); + SubmitApplicationRequest request = + Records.newRecord(SubmitApplicationRequest.class); + request.setApplicationSubmissionContext(appContext); + rmClient.submitApplication(request); + LOG.info("Submitted application " + applicationId + " to ResourceManager" + + " at " + rmAddress); + return applicationId; + } + + @Override + public void killApplication(ApplicationId applicationId) + throws YarnRemoteException { + LOG.info("Killing application " + applicationId); + KillApplicationRequest request = + Records.newRecord(KillApplicationRequest.class); + request.setApplicationId(applicationId); + rmClient.forceKillApplication(request); + } + + @Override + public ApplicationReport getApplicationReport(ApplicationId appId) + throws YarnRemoteException { + GetApplicationReportRequest request = + Records.newRecord(GetApplicationReportRequest.class); + request.setApplicationId(appId); + GetApplicationReportResponse response = + rmClient.getApplicationReport(request); + return response.getApplicationReport(); + } + + @Override + public List getApplicationList() + throws YarnRemoteException { + GetAllApplicationsRequest request = + Records.newRecord(GetAllApplicationsRequest.class); + GetAllApplicationsResponse response = rmClient.getAllApplications(request); + return response.getApplicationList(); + } + + @Override + public YarnClusterMetrics getYarnClusterMetrics() throws YarnRemoteException { + GetClusterMetricsRequest request = + Records.newRecord(GetClusterMetricsRequest.class); + GetClusterMetricsResponse response = rmClient.getClusterMetrics(request); + return response.getClusterMetrics(); + } + + @Override + public List getNodeReports() throws YarnRemoteException { + GetClusterNodesRequest request = + Records.newRecord(GetClusterNodesRequest.class); + GetClusterNodesResponse response = rmClient.getClusterNodes(request); + return response.getNodeReports(); + } + + @Override + public DelegationToken getRMDelegationToken(Text renewer) + throws YarnRemoteException { + /* get the token from RM */ + GetDelegationTokenRequest rmDTRequest = + Records.newRecord(GetDelegationTokenRequest.class); + rmDTRequest.setRenewer(renewer.toString()); + GetDelegationTokenResponse response = + rmClient.getDelegationToken(rmDTRequest); + return response.getRMDelegationToken(); + } + + private GetQueueInfoRequest + getQueueInfoRequest(String queueName, boolean includeApplications, + boolean includeChildQueues, boolean recursive) { + GetQueueInfoRequest request = Records.newRecord(GetQueueInfoRequest.class); + request.setQueueName(queueName); + request.setIncludeApplications(includeApplications); + request.setIncludeChildQueues(includeChildQueues); + request.setRecursive(recursive); + return request; + } + + @Override + public QueueInfo getQueueInfo(String queueName) throws YarnRemoteException { + GetQueueInfoRequest request = + getQueueInfoRequest(queueName, true, false, false); + Records.newRecord(GetQueueInfoRequest.class); + return rmClient.getQueueInfo(request).getQueueInfo(); + } + + @Override + public List getQueueAclsInfo() throws YarnRemoteException { + GetQueueUserAclsInfoRequest request = + Records.newRecord(GetQueueUserAclsInfoRequest.class); + return rmClient.getQueueUserAcls(request).getUserAclsInfoList(); + } + + @Override + public List getAllQueues() throws YarnRemoteException { + List queues = new ArrayList(); + + QueueInfo rootQueue = + rmClient.getQueueInfo(getQueueInfoRequest(ROOT, false, true, true)) + .getQueueInfo(); + getChildQueues(rootQueue, queues, true); + return queues; + } + + @Override + public List getRootQueueInfos() throws YarnRemoteException { + List queues = new ArrayList(); + + QueueInfo rootQueue = + rmClient.getQueueInfo(getQueueInfoRequest(ROOT, false, true, true)) + .getQueueInfo(); + getChildQueues(rootQueue, queues, false); + return queues; + } + + @Override + public List getChildQueueInfos(String parent) + throws YarnRemoteException { + List queues = new ArrayList(); + + QueueInfo parentQueue = + rmClient.getQueueInfo(getQueueInfoRequest(parent, false, true, false)) + .getQueueInfo(); + getChildQueues(parentQueue, queues, true); + return queues; + } + + private void getChildQueues(QueueInfo parent, List queues, + boolean recursive) { + List childQueues = parent.getChildQueues(); + + for (QueueInfo child : childQueues) { + queues.add(child); + if (recursive) { + getChildQueues(child, queues, recursive); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java new file mode 100644 index 0000000000..58737da1f7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java @@ -0,0 +1,30 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.hadoop.yarn.client; + +import org.junit.Test; + +public class TestYarnClient { + + @Test + public void test() { + // More to come later. + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index d6db3815b0..718c5ffea8 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -11,8 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> - +--> 4.0.0 org.apache.hadoop @@ -186,5 +185,6 @@ hadoop-yarn-server hadoop-yarn-applications hadoop-yarn-site + hadoop-yarn-client - + \ No newline at end of file From 0ec6cda4b7b22099e6b5d1fef54f9fa80c94a5a9 Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Mon, 27 Aug 2012 19:35:45 +0000 Subject: [PATCH 08/62] YARN-37. Change TestRMAppTransitions to use the DrainDispatcher. (Contributed by Mayank Bansal) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377803 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../rmapp/TestRMAppTransitions.java | 85 +++++++++++++++---- 2 files changed, 70 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 734e0e1a7c..4b5c2ca0dc 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -42,6 +42,9 @@ Release 2.1.0-alpha - Unreleased YARN-22. Fix ContainerLogs to work if the log-dir is specified as a URI. (Mayank Bansal via sseth) + YARN-37. Change TestRMAppTransitions to use the DrainDispatcher. + (Mayank Bansal via sseth) + Release 0.23.3 - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index c8182596db..0f6093dec2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -33,14 +33,15 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; +import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore; -import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; @@ -48,6 +49,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.Before; @@ -60,7 +63,7 @@ public class TestRMAppTransitions { private RMContext rmContext; private static int maxRetries = 4; private static int appId = 1; -// private AsyncDispatcher rmDispatcher; + private DrainDispatcher rmDispatcher; // ignore all the RM application attempt events private static final class TestApplicationAttemptEventDispatcher implements @@ -110,12 +113,27 @@ public void handle(RMAppEvent event) { } } + // handle all the RM application manager events - same as in + // ResourceManager.java + private static final class TestApplicationManagerEventDispatcher implements + EventHandler { + @Override + public void handle(RMAppManagerEvent event) { + } + } + + // handle all the scheduler events - same as in ResourceManager.java + private static final class TestSchedulerEventDispatcher implements + EventHandler { + @Override + public void handle(SchedulerEvent event) { + } + } + @Before public void setUp() throws Exception { - AsyncDispatcher rmDispatcher = new AsyncDispatcher(); Configuration conf = new Configuration(); - rmDispatcher = new InlineDispatcher(); - + rmDispatcher = new DrainDispatcher(); ContainerAllocationExpirer containerAllocationExpirer = mock(ContainerAllocationExpirer.class); AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class); @@ -131,6 +149,13 @@ null, new ApplicationTokenSecretManager(conf), rmDispatcher.register(RMAppEventType.class, new TestApplicationEventDispatcher(rmContext)); + + rmDispatcher.register(RMAppManagerEventType.class, + new TestApplicationManagerEventDispatcher()); + + rmDispatcher.register(SchedulerEventType.class, + new TestSchedulerEventDispatcher()); + rmDispatcher.init(conf); rmDispatcher.start(); } @@ -225,9 +250,8 @@ private static void assertKilled(RMApp application) { "Application killed by user.", diag.toString()); } - private static void assertAppAndAttemptKilled(RMApp application) { + private static void assertAppAndAttemptKilled(RMApp application) throws InterruptedException { assertKilled(application); - /* also check if the attempt is killed */ Assert.assertEquals( RMAppAttemptState.KILLED, application.getCurrentAppAttempt().getAppAttemptState() ); @@ -332,6 +356,7 @@ public void testUnmanagedApp() throws IOException { RMAppEvent event = new RMAppFailedAttemptEvent( application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED, ""); application.handle(event); + rmDispatcher.await(); RMAppAttempt appAttempt = application.getCurrentAppAttempt(); Assert.assertEquals(1, appAttempt.getAppAttemptId().getAttemptId()); assertFailed(application, @@ -353,6 +378,7 @@ public void testAppNewKill() throws IOException { RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); + rmDispatcher.await(); assertKilled(application); } @@ -366,6 +392,7 @@ public void testAppNewReject() throws IOException { RMAppEvent event = new RMAppRejectedEvent(application.getApplicationId(), rejectedText); application.handle(event); + rmDispatcher.await(); assertFailed(application, rejectedText); } @@ -379,18 +406,22 @@ public void testAppSubmittedRejected() throws IOException { RMAppEvent event = new RMAppRejectedEvent(application.getApplicationId(), rejectedText); application.handle(event); + rmDispatcher.await(); assertFailed(application, rejectedText); } @Test - public void testAppSubmittedKill() throws IOException { + public void testAppSubmittedKill() throws IOException, InterruptedException { LOG.info("--- START: testAppSubmittedKill---"); - - RMApp application = testCreateAppAccepted(null); - // SUBMITTED => KILLED event RMAppEventType.KILL - RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); - this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), application); + RMApp application = testCreateAppSubmitted(null); + // SUBMITTED => KILLED event RMAppEventType.KILL + RMAppEvent event = new RMAppEvent(application.getApplicationId(), + RMAppEventType.KILL); + this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), + application); application.handle(event); + rmDispatcher.await(); + assertKilled(application); assertAppAndAttemptKilled(application); } @@ -410,6 +441,7 @@ public void testAppAcceptedFailed() throws IOException { new RMAppEvent(application.getApplicationId(), RMAppEventType.APP_ACCEPTED); application.handle(event); + rmDispatcher.await(); assertAppState(RMAppState.ACCEPTED, application); } @@ -420,19 +452,23 @@ public void testAppAcceptedFailed() throws IOException { new RMAppFailedAttemptEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED, message); application.handle(event); + rmDispatcher.await(); assertFailed(application, ".*" + message + ".*Failing the application.*"); } @Test - public void testAppAcceptedKill() throws IOException { + public void testAppAcceptedKill() throws IOException, InterruptedException { LOG.info("--- START: testAppAcceptedKill ---"); - RMApp application = testCreateAppAccepted(null); // ACCEPTED => KILLED event RMAppEventType.KILL - RMAppEvent event = - new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); + RMAppEvent event = new RMAppEvent(application.getApplicationId(), + RMAppEventType.KILL); + this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), + application); application.handle(event); + rmDispatcher.await(); assertKilled(application); + assertAppAndAttemptKilled(application); } @Test @@ -444,6 +480,7 @@ public void testAppRunningKill() throws IOException { RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); + rmDispatcher.await(); assertKilled(application); } @@ -462,6 +499,7 @@ public void testAppRunningFailed() throws IOException { new RMAppFailedAttemptEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED, ""); application.handle(event); + rmDispatcher.await(); assertAppState(RMAppState.SUBMITTED, application); appAttempt = application.getCurrentAppAttempt(); Assert.assertEquals(++expectedAttemptId, @@ -470,11 +508,13 @@ public void testAppRunningFailed() throws IOException { new RMAppEvent(application.getApplicationId(), RMAppEventType.APP_ACCEPTED); application.handle(event); + rmDispatcher.await(); assertAppState(RMAppState.ACCEPTED, application); event = new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_REGISTERED); application.handle(event); + rmDispatcher.await(); assertAppState(RMAppState.RUNNING, application); } @@ -484,11 +524,13 @@ public void testAppRunningFailed() throws IOException { new RMAppFailedAttemptEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED, ""); application.handle(event); + rmDispatcher.await(); assertFailed(application, ".*Failing the application.*"); // FAILED => FAILED event RMAppEventType.KILL event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); + rmDispatcher.await(); assertFailed(application, ".*Failing the application.*"); } @@ -501,6 +543,7 @@ public void testAppFinishingKill() throws IOException { RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); + rmDispatcher.await(); assertAppState(RMAppState.FINISHED, application); } @@ -513,6 +556,7 @@ public void testAppFinishedFinished() throws IOException { RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); + rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.FINISHED, application); StringBuilder diag = application.getDiagnostics(); @@ -530,6 +574,7 @@ public void testAppKilledKilled() throws IOException { RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); + rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); @@ -538,6 +583,7 @@ public void testAppKilledKilled() throws IOException { new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FINISHED); application.handle(event); + rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); @@ -546,6 +592,7 @@ public void testAppKilledKilled() throws IOException { new RMAppFailedAttemptEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_FAILED, ""); application.handle(event); + rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); @@ -554,12 +601,14 @@ public void testAppKilledKilled() throws IOException { new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_KILLED); application.handle(event); + rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); // KILLED => KILLED event RMAppEventType.KILL event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); application.handle(event); + rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.KILLED, application); } From cb540cda694d2f380490966f087331d2004cb3bb Mon Sep 17 00:00:00 2001 From: Harsh J Date: Mon, 27 Aug 2012 20:16:16 +0000 Subject: [PATCH 09/62] HADOOP-8719. Workaround for kerberos-related log errors upon running any hadoop command on OSX. Contributed by Jianbin Wei. (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377821 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/conf/hadoop-env.sh | 8 ++++++++ hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh | 9 +++++++++ 3 files changed, 20 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 89a297ce48..2482aa7156 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -95,6 +95,9 @@ Trunk (unreleased changes) the message is printed and the stack trace is not printed to avoid chatter. (Brandon Li via Suresh) + HADOOP-8719. Workaround for kerberos-related log errors upon running any + hadoop command on OSX. (Jianbin Wei via harsh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index 72e8c63f66..42a0d05aaa 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -47,6 +47,14 @@ done # Extra Java runtime options. Empty by default. export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS" +MAC_OSX=false +case "`uname`" in +Darwin*) MAC_OSX=true;; +esac +if $MAC_OSX; then + export HADOOP_OPTS="$HADOOP_OPTS -Djava.security.krb5.realm= -Djava.security.krb5.kdc=" +fi + # Command specific options appended to HADOOP_OPTS when specified export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh index cfcb250b8e..2ccb38e3f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh +++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh @@ -61,6 +61,15 @@ fi # restore ordinary behaviour unset IFS +MAC_OSX=false +case "`uname`" in +Darwin*) MAC_OSX=true;; +esac + +if $MAC_OSX; then + YARN_OPTS="$YARN_OPTS -Djava.security.krb5.realm= -Djava.security.krb5.kdc=" +fi + YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" From 24e47ebc18a62f6de351ec1ab8b9816999cc3267 Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Tue, 28 Aug 2012 00:40:02 +0000 Subject: [PATCH 10/62] MAPREDUCE-4580. Change MapReduce to use the yarn-client module. (Contributed by Vinod Kumar Vavilapalli) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377922 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../hadoop-mapreduce-client-common/pom.xml | 4 + .../hadoop/mapred/ResourceMgrDelegate.java | 212 ++---------------- .../org/apache/hadoop/mapred/YARNRunner.java | 2 +- .../mapred/TestResourceMgrDelegate.java | 20 +- .../TestYarnClientProtocolProvider.java | 15 +- .../hadoop/mapreduce/v2/TestYARNRunner.java | 9 +- 7 files changed, 64 insertions(+), 201 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 37c9591995..882f50f22c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -234,6 +234,9 @@ Release 2.1.0-alpha - Unreleased MAPREDUCE-3289. Make use of fadvise in the NM's shuffle handler. (Todd Lipcon and Siddharth Seth via sseth) + MAPREDUCE-4580. Change MapReduce to use the yarn-client module. + (Vinod Kumar Vavilapalli via sseth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml index 1e9a509ff5..6b22094bcb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml @@ -37,6 +37,10 @@ org.apache.hadoop hadoop-yarn-common
+ + org.apache.hadoop + hadoop-yarn-client + org.apache.hadoop hadoop-mapreduce-client-core diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index 62b608aca4..5cf2a1d480 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -19,9 +19,6 @@ package org.apache.hadoop.mapred; import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -41,75 +38,29 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ClientRMProtocol; -import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; -import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; -import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationReport; -import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; -import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; -import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.factories.RecordFactory; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.util.ProtoUtils; +import org.hadoop.yarn.client.YarnClientImpl; - -// TODO: This should be part of something like yarn-client. -public class ResourceMgrDelegate { +public class ResourceMgrDelegate extends YarnClientImpl { private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class); - private final InetSocketAddress rmAddress; private YarnConfiguration conf; - ClientRMProtocol applicationsManager; + private GetNewApplicationResponse application; private ApplicationId applicationId; - private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); /** * Delegate responsible for communicating with the Resource Manager's {@link ClientRMProtocol}. * @param conf the configuration object. */ public ResourceMgrDelegate(YarnConfiguration conf) { + super(); this.conf = conf; - YarnRPC rpc = YarnRPC.create(this.conf); - this.rmAddress = getRmAddress(conf); - LOG.debug("Connecting to ResourceManager at " + rmAddress); - applicationsManager = - (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, - rmAddress, this.conf); - LOG.debug("Connected to ResourceManager at " + rmAddress); - } - - /** - * Used for injecting applicationsManager, mostly for testing. - * @param conf the configuration object - * @param applicationsManager the handle to talk the resource managers - * {@link ClientRMProtocol}. - */ - public ResourceMgrDelegate(YarnConfiguration conf, - ClientRMProtocol applicationsManager) { - this.conf = conf; - this.applicationsManager = applicationsManager; - this.rmAddress = getRmAddress(conf); - } - - private static InetSocketAddress getRmAddress(YarnConfiguration conf) { - return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_PORT); + init(conf); + start(); } public void cancelDelegationToken(Token arg0) @@ -117,26 +68,15 @@ public void cancelDelegationToken(Token arg0) return; } - public TaskTrackerInfo[] getActiveTrackers() throws IOException, InterruptedException { - GetClusterNodesRequest request = - recordFactory.newRecordInstance(GetClusterNodesRequest.class); - GetClusterNodesResponse response = - applicationsManager.getClusterNodes(request); - return TypeConverter.fromYarnNodes(response.getNodeReports()); + return TypeConverter.fromYarnNodes(super.getNodeReports()); } - public JobStatus[] getAllJobs() throws IOException, InterruptedException { - GetAllApplicationsRequest request = - recordFactory.newRecordInstance(GetAllApplicationsRequest.class); - GetAllApplicationsResponse response = - applicationsManager.getAllApplications(request); - return TypeConverter.fromYarnApps(response.getApplicationList(), this.conf); + return TypeConverter.fromYarnApps(super.getApplicationList(), this.conf); } - public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException, InterruptedException { // TODO: Implement getBlacklistedTrackers @@ -144,128 +84,56 @@ public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException, return new TaskTrackerInfo[0]; } - public ClusterMetrics getClusterMetrics() throws IOException, InterruptedException { - GetClusterMetricsRequest request = recordFactory.newRecordInstance(GetClusterMetricsRequest.class); - GetClusterMetricsResponse response = applicationsManager.getClusterMetrics(request); - YarnClusterMetrics metrics = response.getClusterMetrics(); + YarnClusterMetrics metrics = super.getYarnClusterMetrics(); ClusterMetrics oldMetrics = new ClusterMetrics(1, 1, 1, 1, 1, 1, metrics.getNumNodeManagers() * 10, metrics.getNumNodeManagers() * 2, 1, metrics.getNumNodeManagers(), 0, 0); return oldMetrics; } - @SuppressWarnings("rawtypes") - public Token getDelegationToken(Text renewer) - throws IOException, InterruptedException { - /* get the token from RM */ - org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest - rmDTRequest = recordFactory.newRecordInstance( - org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest.class); - rmDTRequest.setRenewer(renewer.toString()); - org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse - response = applicationsManager.getDelegationToken(rmDTRequest); - DelegationToken yarnToken = response.getRMDelegationToken(); - return ProtoUtils.convertFromProtoFormat(yarnToken, rmAddress); + public Token getDelegationToken(Text renewer) throws IOException, + InterruptedException { + return ProtoUtils.convertFromProtoFormat( + super.getRMDelegationToken(renewer), rmAddress); } - public String getFilesystemName() throws IOException, InterruptedException { return FileSystem.get(conf).getUri().toString(); } public JobID getNewJobID() throws IOException, InterruptedException { - GetNewApplicationRequest request = recordFactory.newRecordInstance(GetNewApplicationRequest.class); - applicationId = applicationsManager.getNewApplication(request).getApplicationId(); + this.application = super.getNewApplication(); + this.applicationId = this.application.getApplicationId(); return TypeConverter.fromYarn(applicationId); } - private static final String ROOT = "root"; - - private GetQueueInfoRequest getQueueInfoRequest(String queueName, - boolean includeApplications, boolean includeChildQueues, boolean recursive) { - GetQueueInfoRequest request = - recordFactory.newRecordInstance(GetQueueInfoRequest.class); - request.setQueueName(queueName); - request.setIncludeApplications(includeApplications); - request.setIncludeChildQueues(includeChildQueues); - request.setRecursive(recursive); - return request; - - } - public QueueInfo getQueue(String queueName) throws IOException, InterruptedException { - GetQueueInfoRequest request = - getQueueInfoRequest(queueName, true, false, false); - recordFactory.newRecordInstance(GetQueueInfoRequest.class); return TypeConverter.fromYarn( - applicationsManager.getQueueInfo(request).getQueueInfo(), this.conf); + super.getQueueInfo(queueName), this.conf); } - - private void getChildQueues(org.apache.hadoop.yarn.api.records.QueueInfo parent, - List queues, - boolean recursive) { - List childQueues = - parent.getChildQueues(); - - for (org.apache.hadoop.yarn.api.records.QueueInfo child : childQueues) { - queues.add(child); - if(recursive) { - getChildQueues(child, queues, recursive); - } - } - } - public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException, InterruptedException { - GetQueueUserAclsInfoRequest request = - recordFactory.newRecordInstance(GetQueueUserAclsInfoRequest.class); - List userAcls = - applicationsManager.getQueueUserAcls(request).getUserAclsInfoList(); - return TypeConverter.fromYarnQueueUserAclsInfo(userAcls); + return TypeConverter.fromYarnQueueUserAclsInfo(super + .getQueueAclsInfo()); } - public QueueInfo[] getQueues() throws IOException, InterruptedException { - List queues = - new ArrayList(); - - org.apache.hadoop.yarn.api.records.QueueInfo rootQueue = - applicationsManager.getQueueInfo( - getQueueInfoRequest(ROOT, false, true, true)).getQueueInfo(); - getChildQueues(rootQueue, queues, true); - - return TypeConverter.fromYarnQueueInfo(queues, this.conf); + return TypeConverter.fromYarnQueueInfo(super.getAllQueues(), this.conf); } - public QueueInfo[] getRootQueues() throws IOException, InterruptedException { - List queues = - new ArrayList(); - - org.apache.hadoop.yarn.api.records.QueueInfo rootQueue = - applicationsManager.getQueueInfo( - getQueueInfoRequest(ROOT, false, true, true)).getQueueInfo(); - getChildQueues(rootQueue, queues, false); - - return TypeConverter.fromYarnQueueInfo(queues, this.conf); + return TypeConverter.fromYarnQueueInfo(super.getRootQueueInfos(), this.conf); } public QueueInfo[] getChildQueues(String parent) throws IOException, InterruptedException { - List queues = - new ArrayList(); - - org.apache.hadoop.yarn.api.records.QueueInfo parentQueue = - applicationsManager.getQueueInfo( - getQueueInfoRequest(parent, false, true, false)).getQueueInfo(); - getChildQueues(parentQueue, queues, true); - - return TypeConverter.fromYarnQueueInfo(queues, this.conf); + return TypeConverter.fromYarnQueueInfo(super.getChildQueueInfos(parent), + this.conf); } public String getStagingAreaDir() throws IOException, InterruptedException { @@ -307,40 +175,6 @@ public long renewDelegationToken(Token arg0) return 0; } - - public ApplicationId submitApplication( - ApplicationSubmissionContext appContext) - throws IOException { - appContext.setApplicationId(applicationId); - SubmitApplicationRequest request = - recordFactory.newRecordInstance(SubmitApplicationRequest.class); - request.setApplicationSubmissionContext(appContext); - applicationsManager.submitApplication(request); - LOG.info("Submitted application " + applicationId + " to ResourceManager" + - " at " + rmAddress); - return applicationId; - } - - public void killApplication(ApplicationId applicationId) throws IOException { - KillApplicationRequest request = - recordFactory.newRecordInstance(KillApplicationRequest.class); - request.setApplicationId(applicationId); - applicationsManager.forceKillApplication(request); - LOG.info("Killing application " + applicationId); - } - - - public ApplicationReport getApplicationReport(ApplicationId appId) - throws YarnRemoteException { - GetApplicationReportRequest request = recordFactory - .newRecordInstance(GetApplicationReportRequest.class); - request.setApplicationId(appId); - GetApplicationReportResponse response = applicationsManager - .getApplicationReport(request); - ApplicationReport applicationReport = response.getApplicationReport(); - return applicationReport; - } - public ApplicationId getApplicationId() { return applicationId; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index 74ae6446cd..f3271768ae 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -89,7 +89,7 @@ /** * This class enables the current JobClient (0.22 hadoop) to run on YARN. */ -@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" }) +@SuppressWarnings({ "rawtypes", "unchecked" }) public class YARNRunner implements ClientProtocol { private static final Log LOG = LogFactory.getLog(YARNRunner.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java index ad1ebc9d80..cd325a1d60 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java @@ -50,7 +50,7 @@ public class TestResourceMgrDelegate { */ @Test public void testGetRootQueues() throws IOException, InterruptedException { - ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class); + final ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class); GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class); org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); @@ -59,12 +59,17 @@ public void testGetRootQueues() throws IOException, InterruptedException { GetQueueInfoRequest.class))).thenReturn(response); ResourceMgrDelegate delegate = new ResourceMgrDelegate( - new YarnConfiguration(), applicationsManager); + new YarnConfiguration()) { + @Override + public synchronized void start() { + this.rmClient = applicationsManager; + } + }; delegate.getRootQueues(); ArgumentCaptor argument = ArgumentCaptor.forClass(GetQueueInfoRequest.class); - Mockito.verify(delegate.applicationsManager).getQueueInfo( + Mockito.verify(applicationsManager).getQueueInfo( argument.capture()); Assert.assertTrue("Children of root queue not requested", @@ -75,7 +80,7 @@ public void testGetRootQueues() throws IOException, InterruptedException { @Test public void tesAllJobs() throws Exception { - ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class); + final ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class); GetAllApplicationsResponse allApplicationsResponse = Records .newRecord(GetAllApplicationsResponse.class); List applications = new ArrayList(); @@ -93,7 +98,12 @@ public void tesAllJobs() throws Exception { .any(GetAllApplicationsRequest.class))).thenReturn( allApplicationsResponse); ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate( - new YarnConfiguration(), applicationsManager); + new YarnConfiguration()) { + @Override + public synchronized void start() { + this.rmClient = applicationsManager; + } + }; JobStatus[] allJobs = resourceMgrDelegate.getAllJobs(); Assert.assertEquals(State.FAILED, allJobs[0].getState()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java index 7052c76902..1bbffb8fde 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java @@ -18,9 +18,10 @@ package org.apache.hadoop.mapreduce; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.when; + import java.io.IOException; import java.nio.ByteBuffer; @@ -104,14 +105,20 @@ public void testClusterGetDelegationToken() throws Exception { rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes())); rmDTToken.setService("0.0.0.0:8032"); getDTResponse.setRMDelegationToken(rmDTToken); - ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class); + final ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class); when(cRMProtocol.getDelegationToken(any( GetDelegationTokenRequest.class))).thenReturn(getDTResponse); ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate( - new YarnConfiguration(conf), cRMProtocol); + new YarnConfiguration(conf)) { + @Override + public synchronized void start() { + this.rmClient = cRMProtocol; + } + }; yrunner.setResourceMgrDelegate(rmgrDelegate); Token t = cluster.getDelegationToken(new Text(" ")); - assertTrue("Testclusterkind".equals(t.getKind().toString())); + assertTrue("Token kind is instead " + t.getKind().toString(), + "Testclusterkind".equals(t.getKind().toString())); } finally { if (cluster != null) { cluster.close(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java index bacf164863..3bbd1324bd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java @@ -177,8 +177,13 @@ public void testJobSubmissionFailure() throws Exception { @Test public void testResourceMgrDelegate() throws Exception { /* we not want a mock of resourcemgr deleagte */ - ClientRMProtocol clientRMProtocol = mock(ClientRMProtocol.class); - ResourceMgrDelegate delegate = new ResourceMgrDelegate(conf, clientRMProtocol); + final ClientRMProtocol clientRMProtocol = mock(ClientRMProtocol.class); + ResourceMgrDelegate delegate = new ResourceMgrDelegate(conf) { + @Override + public synchronized void start() { + this.rmClient = clientRMProtocol; + } + }; /* make sure kill calls finish application master */ when(clientRMProtocol.forceKillApplication(any(KillApplicationRequest.class))) .thenReturn(null); From b29cb2d99756c3ae56ab12fac38d95668b8eb2f1 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 28 Aug 2012 01:41:13 +0000 Subject: [PATCH 11/62] HDFS-3856. TestHDFSServerPorts failure is causing surefire fork failure. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377934 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/server/namenode/NameNode.java | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index db82a6c6f5..b39e94424f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -684,6 +684,9 @@ Branch-2 ( Unreleased changes ) HDFS-3683. Edit log replay progress indicator shows >100% complete. (Plamen Jeliazkov via atm) + HDFS-3856. TestHDFSServerPorts failure is causing surefire fork failure. + (Colin Patrick McCabe via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 083b16be68..c1bca3b45b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -511,13 +511,13 @@ private void stopCommonServices() { } private void startTrashEmptier(Configuration conf) throws IOException { - long trashInterval = namesystem.getServerDefaults().getTrashInterval(); + long trashInterval = + conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); if (trashInterval == 0) { return; } else if (trashInterval < 0) { throw new IOException("Cannot start tresh emptier with negative interval." - + " Set " + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY + " to a" - + " positive value."); + + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value."); } this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); From 36c53880804c1e7f25d54ed12364620ed33cea54 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 28 Aug 2012 01:45:26 +0000 Subject: [PATCH 12/62] Fixup CHANGELOG for HDFS-3856. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377936 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b39e94424f..28f0c46cd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -685,7 +685,7 @@ Branch-2 ( Unreleased changes ) Jeliazkov via atm) HDFS-3856. TestHDFSServerPorts failure is causing surefire fork failure. - (Colin Patrick McCabe via eli) + (eli) BREAKDOWN OF HDFS-3042 SUBTASKS From cfe25b88282ad99ff7fe466f1ba88cd60dff0f6f Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 28 Aug 2012 02:03:26 +0000 Subject: [PATCH 13/62] MAPREDUCE-4579. Split TestTaskAttempt into two so as to pass tests on jdk7. Contributed by Thomas Graves. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377943 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../v2/app/job/impl/TestTaskAttempt.java | 119 ++------------ .../impl/TestTaskAttemptContainerRequest.java | 154 ++++++++++++++++++ 3 files changed, 173 insertions(+), 103 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 882f50f22c..5ba3f36737 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -237,6 +237,9 @@ Release 2.1.0-alpha - Unreleased MAPREDUCE-4580. Change MapReduce to use the yarn-client module. (Vinod Kumar Vavilapalli via sseth) + MAPREDUCE-4579. Split TestTaskAttempt into two so as to pass tests on + jdk7. (Thomas Graves via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 3eff0c1cfe..88e32b337c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -36,23 +35,17 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.io.DataInputByteBuffer; -import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapTaskAttemptImpl; -import org.apache.hadoop.mapred.WrappedJvmID; import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.OutputCommitter; -import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion; -import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -79,18 +72,14 @@ import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.Clock; import org.apache.hadoop.yarn.ClusterInfo; import org.apache.hadoop.yarn.SystemClock; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.Event; @@ -101,82 +90,6 @@ @SuppressWarnings({"unchecked", "rawtypes"}) public class TestTaskAttempt{ - @Test - public void testAttemptContainerRequest() throws Exception { - //WARNING: This test must run first. This is because there is an - // optimization where the credentials passed in are cached statically so - // they do not need to be recomputed when creating a new - // ContainerLaunchContext. if other tests run first this code will cache - // their credentials and this test will fail trying to look for the - // credentials it inserted in. - final Text SECRET_KEY_ALIAS = new Text("secretkeyalias"); - final byte[] SECRET_KEY = ("secretkey").getBytes(); - Map acls = - new HashMap(1); - acls.put(ApplicationAccessType.VIEW_APP, "otheruser"); - ApplicationId appId = BuilderUtils.newApplicationId(1, 1); - JobId jobId = MRBuilderUtils.newJobId(appId, 1); - TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); - Path jobFile = mock(Path.class); - - EventHandler eventHandler = mock(EventHandler.class); - TaskAttemptListener taListener = mock(TaskAttemptListener.class); - when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); - - JobConf jobConf = new JobConf(); - jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); - jobConf.setBoolean("fs.file.impl.disable.cache", true); - jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); - - // setup UGI for security so tokens and keys are preserved - jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - UserGroupInformation.setConfiguration(jobConf); - - Credentials credentials = new Credentials(); - credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY); - Token jobToken = new Token( - ("tokenid").getBytes(), ("tokenpw").getBytes(), - new Text("tokenkind"), new Text("tokenservice")); - - TaskAttemptImpl taImpl = - new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, - mock(TaskSplitMetaInfo.class), jobConf, taListener, - mock(OutputCommitter.class), jobToken, credentials, - new SystemClock(), null); - - jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString()); - ContainerId containerId = BuilderUtils.newContainerId(1, 1, 1, 1); - - ContainerLaunchContext launchCtx = - TaskAttemptImpl.createContainerLaunchContext(acls, containerId, - jobConf, jobToken, taImpl.createRemoteTask(), - TypeConverter.fromYarn(jobId), mock(Resource.class), - mock(WrappedJvmID.class), taListener, - credentials); - - Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs()); - Credentials launchCredentials = new Credentials(); - - DataInputByteBuffer dibb = new DataInputByteBuffer(); - dibb.reset(launchCtx.getContainerTokens()); - launchCredentials.readTokenStorageStream(dibb); - - // verify all tokens specified for the task attempt are in the launch context - for (Token token : credentials.getAllTokens()) { - Token launchToken = - launchCredentials.getToken(token.getService()); - Assert.assertNotNull("Token " + token.getService() + " is missing", - launchToken); - Assert.assertEquals("Token " + token.getService() + " mismatch", - token, launchToken); - } - - // verify the secret key is in the launch context - Assert.assertNotNull("Secret key missing", - launchCredentials.getSecretKey(SECRET_KEY_ALIAS)); - Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY, - launchCredentials.getSecretKey(SECRET_KEY_ALIAS))); - } static public class StubbedFS extends RawLocalFileSystem { @Override @@ -227,7 +140,7 @@ public void testSingleRackRequest() throws Exception { //Only a single occurrence of /DefaultRack assertEquals(1, requestedRacks.length); } - + @Test public void testHostResolveAttempt() throws Exception { TaskAttemptImpl.RequestContainerTransition rct = @@ -266,7 +179,7 @@ public void testHostResolveAttempt() throws Exception { } assertEquals(0, expected.size()); } - + @Test public void testSlotMillisCounterUpdate() throws Exception { verifySlotMillis(2048, 2048, 1024); @@ -325,13 +238,13 @@ public void verifySlotMillis(int mapMemMb, int reduceMemMb, .getAllCounters().findCounter(JobCounter.SLOTS_MILLIS_REDUCES) .getValue()); } - + private TaskAttemptImpl createMapTaskAttemptImplForTest( EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo) { Clock clock = new SystemClock(); return createMapTaskAttemptImplForTest(eventHandler, taskSplitMetaInfo, clock); } - + private TaskAttemptImpl createMapTaskAttemptImplForTest( EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) { ApplicationId appId = BuilderUtils.newApplicationId(1, 1); @@ -402,30 +315,30 @@ public void handle(JobHistoryEvent event) { }; } } - + @Test public void testLaunchFailedWhileKilling() throws Exception { ApplicationId appId = BuilderUtils.newApplicationId(1, 2); - ApplicationAttemptId appAttemptId = + ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 0); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Path jobFile = mock(Path.class); - + MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); - + JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10"); - + TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"}); - + TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, @@ -437,7 +350,7 @@ public void testLaunchFailedWhileKilling() throws Exception { Container container = mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); - + taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, @@ -450,7 +363,7 @@ public void testLaunchFailedWhileKilling() throws Exception { TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); assertFalse(eventHandler.internalError); } - + @Test public void testContainerCleanedWhileRunning() throws Exception { ApplicationId appId = BuilderUtils.newApplicationId(1, 2); @@ -565,7 +478,7 @@ public void testContainerCleanedWhileCommitting() throws Exception { assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED", eventHandler.internalError); } - + @Test public void testDoubleTooManyFetchFailure() throws Exception { ApplicationId appId = BuilderUtils.newApplicationId(1, 2); @@ -618,7 +531,7 @@ public void testDoubleTooManyFetchFailure() throws Exception { TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED)); - + assertEquals("Task attempt is not in succeeded state", taImpl.getState(), TaskAttemptState.SUCCEEDED); taImpl.handle(new TaskAttemptEvent(attemptId, @@ -635,7 +548,7 @@ public void testDoubleTooManyFetchFailure() throws Exception { public static class MockEventHandler implements EventHandler { public boolean internalError; - + @Override public void handle(Event event) { if (event instanceof JobEvent) { @@ -645,6 +558,6 @@ public void handle(Event event) { } } } - + }; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java new file mode 100644 index 0000000000..fbd23e1555 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app.job.impl; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import junit.framework.Assert; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RawLocalFileSystem; +import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapTaskAttemptImpl; +import org.apache.hadoop.mapred.WrappedJvmID; +import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.OutputCommitter; +import org.apache.hadoop.mapreduce.TypeConverter; +import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; +import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; +import org.apache.hadoop.mapreduce.v2.api.records.JobId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener; +import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.yarn.SystemClock; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.util.BuilderUtils; +import org.junit.Test; + +@SuppressWarnings({"rawtypes"}) +public class TestTaskAttemptContainerRequest { + + //WARNING: This test must be the only test in this file. This is because + // there is an optimization where the credentials passed in are cached + // statically so they do not need to be recomputed when creating a new + // ContainerLaunchContext. if other tests run first this code will cache + // their credentials and this test will fail trying to look for the + // credentials it inserted in. + + @Test + public void testAttemptContainerRequest() throws Exception { + final Text SECRET_KEY_ALIAS = new Text("secretkeyalias"); + final byte[] SECRET_KEY = ("secretkey").getBytes(); + Map acls = + new HashMap(1); + acls.put(ApplicationAccessType.VIEW_APP, "otheruser"); + ApplicationId appId = BuilderUtils.newApplicationId(1, 1); + JobId jobId = MRBuilderUtils.newJobId(appId, 1); + TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); + Path jobFile = mock(Path.class); + + EventHandler eventHandler = mock(EventHandler.class); + TaskAttemptListener taListener = mock(TaskAttemptListener.class); + when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); + + JobConf jobConf = new JobConf(); + jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); + jobConf.setBoolean("fs.file.impl.disable.cache", true); + jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); + + // setup UGI for security so tokens and keys are preserved + jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + UserGroupInformation.setConfiguration(jobConf); + + Credentials credentials = new Credentials(); + credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY); + Token jobToken = new Token( + ("tokenid").getBytes(), ("tokenpw").getBytes(), + new Text("tokenkind"), new Text("tokenservice")); + + TaskAttemptImpl taImpl = + new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, + mock(TaskSplitMetaInfo.class), jobConf, taListener, + mock(OutputCommitter.class), jobToken, credentials, + new SystemClock(), null); + + jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString()); + ContainerId containerId = BuilderUtils.newContainerId(1, 1, 1, 1); + + ContainerLaunchContext launchCtx = + TaskAttemptImpl.createContainerLaunchContext(acls, containerId, + jobConf, jobToken, taImpl.createRemoteTask(), + TypeConverter.fromYarn(jobId), mock(Resource.class), + mock(WrappedJvmID.class), taListener, + credentials); + + Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs()); + Credentials launchCredentials = new Credentials(); + + DataInputByteBuffer dibb = new DataInputByteBuffer(); + dibb.reset(launchCtx.getContainerTokens()); + launchCredentials.readTokenStorageStream(dibb); + + // verify all tokens specified for the task attempt are in the launch context + for (Token token : credentials.getAllTokens()) { + Token launchToken = + launchCredentials.getToken(token.getService()); + Assert.assertNotNull("Token " + token.getService() + " is missing", + launchToken); + Assert.assertEquals("Token " + token.getService() + " mismatch", + token, launchToken); + } + + // verify the secret key is in the launch context + Assert.assertNotNull("Secret key missing", + launchCredentials.getSecretKey(SECRET_KEY_ALIAS)); + Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY, + launchCredentials.getSecretKey(SECRET_KEY_ALIAS))); + } + + static public class StubbedFS extends RawLocalFileSystem { + @Override + public FileStatus getFileStatus(Path f) throws IOException { + return new FileStatus(1, false, 1, 1, 1, f); + } + } + +} From 7fff7b42bf1347a2cb5558ba4da08f19eff201e1 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 28 Aug 2012 03:31:11 +0000 Subject: [PATCH 14/62] YARN-31. Fix TestDelegationTokenRenewer to not depend on test order so as to pass tests on jdk7. Contributed by Thomas Graves. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1377961 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../security/TestDelegationTokenRenewer.java | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 4b5c2ca0dc..9942271c64 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -67,3 +67,6 @@ Release 0.23.3 - Unreleased YARN-39. RM-NM secret-keys should be randomly generated and rolled every so often. (vinodkv and sseth via sseth) + + YARN-31. Fix TestDelegationTokenRenewer to not depend on test order so as to + pass tests on jdk7. (Thomas Graves via vinodkv) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 4184465451..1c3614e46d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -66,6 +66,12 @@ public static class Renewer extends TokenRenewer { private static Token lastRenewed = null; private static Token tokenToRenewIn2Sec = null; + private static void reset() { + counter = 0; + lastRenewed = null; + tokenToRenewIn2Sec = null; + } + @Override public boolean handleKind(Text kind) { return KIND.equals(kind); @@ -124,6 +130,7 @@ public static void setUpClass() throws Exception { @Before public void setUp() throws Exception { + Renewer.reset(); delegationTokenRenewer = new DelegationTokenRenewer(); delegationTokenRenewer.init(conf); delegationTokenRenewer.start(); @@ -367,7 +374,7 @@ public void testDTRenewalWithNoCancel () throws Exception { Credentials ts = new Credentials(); MyToken token1 = dfs.getDelegationToken(new Text("user1")); - + //to cause this one to be set for renew in 2 secs Renewer.tokenToRenewIn2Sec = token1; LOG.info("token="+token1+" should be renewed for 2 secs"); From e7b12c89e11d8339b1e508555720a21616e8bdbd Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Tue, 28 Aug 2012 13:05:31 +0000 Subject: [PATCH 15/62] HADOOP-8619. WritableComparator must implement no-arg constructor. Contributed by Chris Douglas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378120 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../apache/hadoop/io/WritableComparator.java | 4 ++ .../serializer/TestWritableSerialization.java | 51 ++++++++++++++++++- 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2482aa7156..fdecdc2027 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -98,6 +98,9 @@ Trunk (unreleased changes) HADOOP-8719. Workaround for kerberos-related log errors upon running any hadoop command on OSX. (Jianbin Wei via harsh) + HADOOP-8619. WritableComparator must implement no-arg constructor. + (Chris Douglas via Suresh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java index eb3c8d322c..9d4087f1cd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java @@ -87,6 +87,10 @@ public static synchronized void define(Class c, private final WritableComparable key2; private final DataInputBuffer buffer; + protected WritableComparator() { + this(null); + } + /** Construct for a {@link WritableComparable} implementation. */ protected WritableComparator(Class keyClass) { this(keyClass, false); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestWritableSerialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestWritableSerialization.java index 28e37add5e..7ef5749bfb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestWritableSerialization.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestWritableSerialization.java @@ -18,25 +18,34 @@ package org.apache.hadoop.io.serializer; +import java.io.Serializable; + +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_KEY; import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_VALUE; -import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.TestGenericWritable.Baz; import org.apache.hadoop.io.TestGenericWritable.FooGenericWritable; +import org.apache.hadoop.io.WritableComparator; -public class TestWritableSerialization extends TestCase { +import org.junit.Test; +import static org.junit.Assert.*; + +public class TestWritableSerialization { private static final Configuration conf = new Configuration(); + @Test public void testWritableSerialization() throws Exception { Text before = new Text("test writable"); Text after = SerializationTestUtil.testSerialization(conf, before); assertEquals(before, after); } + @Test public void testWritableConfigurable() throws Exception { //set the configuration parameter @@ -52,4 +61,42 @@ public void testWritableConfigurable() throws Exception { assertEquals(baz, result); assertNotNull(result.getConf()); } + + @Test + @SuppressWarnings({"rawtypes", "unchecked"}) + public void testWritableComparatorJavaSerialization() throws Exception { + Serialization ser = new JavaSerialization(); + + Serializer serializer = ser.getSerializer(TestWC.class); + DataOutputBuffer dob = new DataOutputBuffer(); + serializer.open(dob); + TestWC orig = new TestWC(0); + serializer.serialize(orig); + serializer.close(); + + Deserializer deserializer = ser.getDeserializer(TestWC.class); + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(dob.getData(), 0, dob.getLength()); + deserializer.open(dib); + TestWC deser = deserializer.deserialize(null); + deserializer.close(); + assertEquals(orig, deser); + } + + static class TestWC extends WritableComparator implements Serializable { + static final long serialVersionUID = 0x4344; + final int val; + TestWC() { this(7); } + TestWC(int val) { this.val = val; } + @Override + public boolean equals(Object o) { + if (o instanceof TestWC) { + return ((TestWC)o).val == val; + } + return false; + } + @Override + public int hashCode() { return val; } + } + } From 54d4630d9bfd68a8c2cfa4dbaf425f3d0e4ae8ce Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 28 Aug 2012 15:30:43 +0000 Subject: [PATCH 16/62] HADOOP-8738. junit JAR is showing up in the distro (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378175 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-mapreduce-project/pom.xml | 1 + hadoop-yarn-project/pom.xml | 1 + 3 files changed, 4 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index fdecdc2027..d89a33ee11 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -445,6 +445,8 @@ Branch-2 ( Unreleased changes ) HADOOP-8031. Configuration class fails to find embedded .jar resources; should use URL.openStream() (genman via tucu) + HADOOP-8738. junit JAR is showing up in the distro (tucu) + BREAKDOWN OF HDFS-3042 SUBTASKS HADOOP-8220. ZKFailoverController doesn't handle failure to become active diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index b2279b647a..225a582dbe 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -149,6 +149,7 @@ junit junit + test org.jboss.netty diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index c1e38967c5..819519a74c 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -148,6 +148,7 @@ junit junit + test org.jboss.netty From 8d724dd8b97cc4c10c03742eb359796c2f51b7a9 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Tue, 28 Aug 2012 17:01:38 +0000 Subject: [PATCH 17/62] HDFS-3860. HeartbeatManager#Monitor may wrongly hold the writelock of namesystem. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378228 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/blockmanagement/HeartbeatManager.java | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 28f0c46cd1..42814c19fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -687,6 +687,9 @@ Branch-2 ( Unreleased changes ) HDFS-3856. TestHDFSServerPorts failure is causing surefire fork failure. (eli) + HDFS-3860. HeartbeatManager#Monitor may wrongly hold the writelock of + namesystem. (Jing Zhao via atm) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index 73926010a4..449619ced7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -223,10 +223,10 @@ void heartbeatCheck() { if (!allAlive) { // acquire the fsnamesystem lock, and then remove the dead node. namesystem.writeLock(); - if (namesystem.isInSafeMode()) { - return; - } try { + if (namesystem.isInSafeMode()) { + return; + } synchronized(this) { dm.removeDeadDatanode(dead); } From 963d01a0afbb34d7f2cd081fadf9240f5caed274 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 28 Aug 2012 18:29:17 +0000 Subject: [PATCH 18/62] MAPREDUCE-4600. TestTokenCache.java from MRV1 no longer compiles (daryn via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378255 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../mapreduce/security/TestTokenCache.java | 459 ------------------ 2 files changed, 3 insertions(+), 459 deletions(-) delete mode 100644 hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5ba3f36737..89afe5c4d0 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -841,6 +841,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4570. ProcfsBasedProcessTree#constructProcessInfo() prints a warning if procfsDir//stat is not found. (Ahmed Radwan via bobby) + MAPREDUCE-4600. TestTokenCache.java from MRV1 no longer compiles (daryn + via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java deleted file mode 100644 index 293645a1d8..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java +++ /dev/null @@ -1,459 +0,0 @@ -/** Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.security; - - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; - -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.NoSuchAlgorithmException; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.crypto.KeyGenerator; -import javax.crypto.spec.SecretKeySpec; - -import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.viewfs.ViewFileSystem; -import org.apache.hadoop.hdfs.HftpFileSystem; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.Master; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.tools.HadoopArchives; -import org.apache.hadoop.util.ToolRunner; -import org.codehaus.jackson.map.ObjectMapper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -public class TestTokenCache { - private static final int NUM_OF_KEYS = 10; - - // my sleep class - adds check for tokenCache - static class MySleepMapper extends SleepJob.SleepMapper { - /** - * attempts to access tokenCache as from client - */ - @Override - public void map(IntWritable key, IntWritable value, Context context) - throws IOException, InterruptedException { - // get token storage and a key - Credentials ts = context.getCredentials(); - byte[] key1 = ts.getSecretKey(new Text("alias1")); - Collection> dts = ts.getAllTokens(); - int dts_size = 0; - if(dts != null) - dts_size = dts.size(); - - - if(dts_size != 2) { // one job token and one delegation token - throw new RuntimeException("tokens are not available"); // fail the test - } - - - if(key1 == null || ts == null || ts.numberOfSecretKeys() != NUM_OF_KEYS) { - throw new RuntimeException("secret keys are not available"); // fail the test - } - super.map(key, value, context); - } - } - - class MySleepJob extends SleepJob { - @Override - public Job createJob(int numMapper, int numReducer, - long mapSleepTime, int mapSleepCount, - long reduceSleepTime, int reduceSleepCount) - throws IOException { - Job job = super.createJob(numMapper, numReducer, - mapSleepTime, mapSleepCount, - reduceSleepTime, reduceSleepCount); - - job.setMapperClass(MySleepMapper.class); - //Populate tokens here because security is disabled. - populateTokens(job); - return job; - } - - private void populateTokens(Job job) { - // Credentials in the job will not have delegation tokens - // because security is disabled. Fetch delegation tokens - // and populate the credential in the job. - try { - Credentials ts = job.getCredentials(); - Path p1 = new Path("file1"); - p1 = p1.getFileSystem(job.getConfiguration()).makeQualified(p1); - Credentials cred = new Credentials(); - TokenCache.obtainTokensForNamenodesInternal(cred, new Path[] { p1 }, - job.getConfiguration()); - for (Token t : cred.getAllTokens()) { - ts.addToken(new Text("Hdfs"), t); - } - } catch (IOException e) { - Assert.fail("Exception " + e); - } - } - } - - private static MiniMRCluster mrCluster; - private static MiniDFSCluster dfsCluster; - private static final Path TEST_DIR = - new Path(System.getProperty("test.build.data","/tmp"), "sleepTest"); - private static final Path tokenFileName = new Path(TEST_DIR, "tokenFile.json"); - private static int numSlaves = 1; - private static JobConf jConf; - private static ObjectMapper mapper = new ObjectMapper(); - private static Path p1; - private static Path p2; - - @BeforeClass - public static void setUp() throws Exception { - - Configuration conf = new Configuration(); - conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1]"); - dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); - jConf = new JobConf(conf); - mrCluster = new MiniMRCluster(0, 0, numSlaves, - dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, - jConf); - - createTokenFileJson(); - verifySecretKeysInJSONFile(); - NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); - FileSystem fs = dfsCluster.getFileSystem(); - - p1 = new Path("file1"); - p2 = new Path("file2"); - - p1 = fs.makeQualified(p1); - } - - @AfterClass - public static void tearDown() throws Exception { - if(mrCluster != null) - mrCluster.shutdown(); - mrCluster = null; - if(dfsCluster != null) - dfsCluster.shutdown(); - dfsCluster = null; - } - - // create jason file and put some keys into it.. - private static void createTokenFileJson() throws IOException { - Map map = new HashMap(); - - try { - KeyGenerator kg = KeyGenerator.getInstance("HmacSHA1"); - for(int i=0; i map; - map = mapper.readValue(new File(tokenFileName.toString()), Map.class); - assertEquals("didn't read JSON correctly", map.size(), NUM_OF_KEYS); - } - - /** - * run a distributed job and verify that TokenCache is available - * @throws IOException - */ - @Test - public void testTokenCache() throws IOException { - - System.out.println("running dist job"); - - // make sure JT starts - jConf = mrCluster.createJobConf(); - - // provide namenodes names for the job to get the delegation tokens for - String nnUri = dfsCluster.getURI(0).toString(); - jConf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri); - // job tracker principla id.. - jConf.set(JTConfig.JT_USER_NAME, "jt_id/foo@BAR"); - - // using argument to pass the file name - String[] args = { - "-tokenCacheFile", tokenFileName.toString(), - "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" - }; - - int res = -1; - try { - res = ToolRunner.run(jConf, new MySleepJob(), args); - } catch (Exception e) { - System.out.println("Job failed with" + e.getLocalizedMessage()); - e.printStackTrace(System.out); - fail("Job failed"); - } - assertEquals("dist job res is not 0", res, 0); - } - - /** - * run a local job and verify that TokenCache is available - * @throws NoSuchAlgorithmException - * @throws IOException - */ - @Test - public void testLocalJobTokenCache() throws NoSuchAlgorithmException, IOException { - - System.out.println("running local job"); - // this is local job - String[] args = {"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"}; - jConf.set("mapreduce.job.credentials.json", tokenFileName.toString()); - - int res = -1; - try { - res = ToolRunner.run(jConf, new MySleepJob(), args); - } catch (Exception e) { - System.out.println("Job failed with" + e.getLocalizedMessage()); - e.printStackTrace(System.out); - fail("local Job failed"); - } - assertEquals("local job res is not 0", res, 0); - } - - @Test - public void testGetTokensForNamenodes() throws IOException { - - Credentials credentials = new Credentials(); - TokenCache.obtainTokensForNamenodesInternal(credentials, new Path[] { p1, - p2 }, jConf); - - // this token is keyed by hostname:port key. - String fs_addr = - SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT); - Token nnt = (Token)TokenCache.getDelegationToken( - credentials, fs_addr); - System.out.println("dt for " + p1 + "(" + fs_addr + ")" + " = " + nnt); - assertNotNull("Token for nn is null", nnt); - - // verify the size - Collection> tns = credentials.getAllTokens(); - assertEquals("number of tokens is not 1", 1, tns.size()); - - boolean found = false; - for(Token t: tns) { - if(t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) && - t.getService().equals(new Text(fs_addr))) { - found = true; - } - assertTrue("didn't find token for " + p1 ,found); - } - } - - @Test - public void testGetTokensForHftpFS() throws IOException, URISyntaxException { - HftpFileSystem hfs = mock(HftpFileSystem.class); - - DelegationTokenSecretManager dtSecretManager = - NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()); - String renewer = "renewer"; - jConf.set(JTConfig.JT_USER_NAME,renewer); - DelegationTokenIdentifier dtId = - new DelegationTokenIdentifier(new Text("user"), new Text(renewer), null); - final Token t = - new Token(dtId, dtSecretManager); - - final URI uri = new URI("hftp://127.0.0.1:2222/file1"); - final String fs_addr = - SecurityUtil.buildDTServiceName(uri, NameNode.DEFAULT_PORT); - t.setService(new Text(fs_addr)); - - //when(hfs.getUri()).thenReturn(uri); - Mockito.doAnswer(new Answer(){ - @Override - public URI answer(InvocationOnMock invocation) - throws Throwable { - return uri; - }}).when(hfs).getUri(); - - //when(hfs.getDelegationToken()).thenReturn((Token) t); - Mockito.doAnswer(new Answer>(){ - @Override - public Token answer(InvocationOnMock invocation) - throws Throwable { - return t; - }}).when(hfs).getDelegationToken(renewer); - - //when(hfs.getDelegationTokens()).thenReturn((Token) t); - Mockito.doAnswer(new Answer>>(){ - @Override - public List> answer(InvocationOnMock invocation) - throws Throwable { - return Collections.singletonList(t); - }}).when(hfs).getDelegationTokens(renewer); - - //when(hfs.getCanonicalServiceName).thenReturn(fs_addr); - Mockito.doAnswer(new Answer(){ - @Override - public String answer(InvocationOnMock invocation) - throws Throwable { - return fs_addr; - }}).when(hfs).getCanonicalServiceName(); - - Credentials credentials = new Credentials(); - Path p = new Path(uri.toString()); - System.out.println("Path for hftp="+ p + "; fs_addr="+fs_addr + "; rn=" + renewer); - TokenCache.obtainTokensForNamenodesInternal(hfs, credentials, jConf); - - Collection> tns = credentials.getAllTokens(); - assertEquals("number of tokens is not 1", 1, tns.size()); - - boolean found = false; - for(Token tt: tns) { - System.out.println("token="+tt); - if(tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) && - tt.getService().equals(new Text(fs_addr))) { - found = true; - assertEquals("different token", tt, t); - } - assertTrue("didn't find token for " + p, found); - } - } - - /** - * verify _HOST substitution - * @throws IOException - */ - @Test - public void testGetJTPrincipal() throws IOException { - String serviceName = "jt/"; - String hostName = "foo"; - String domainName = "@BAR"; - Configuration conf = new Configuration(); - conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME); - conf.set(JTConfig.JT_IPC_ADDRESS, hostName + ":8888"); - conf.set(JTConfig.JT_USER_NAME, serviceName + SecurityUtil.HOSTNAME_PATTERN - + domainName); - assertEquals("Failed to substitute HOSTNAME_PATTERN with hostName", - serviceName + hostName + domainName, Master.getMasterPrincipal(conf)); - } - - @Test - public void testGetTokensForViewFS() throws IOException, URISyntaxException { - Configuration conf = new Configuration(jConf); - FileSystem dfs = dfsCluster.getFileSystem(); - String serviceName = dfs.getCanonicalServiceName(); - - Path p1 = new Path("/mount1"); - Path p2 = new Path("/mount2"); - p1 = dfs.makeQualified(p1); - p2 = dfs.makeQualified(p2); - - conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString()); - conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString()); - Credentials credentials = new Credentials(); - Path lp1 = new Path("viewfs:///dir1"); - Path lp2 = new Path("viewfs:///dir2"); - Path[] paths = new Path[2]; - paths[0] = lp1; - paths[1] = lp2; - TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf); - - Collection> tns = - credentials.getAllTokens(); - assertEquals("number of tokens is not 1", 1, tns.size()); - - boolean found = false; - for (Token tt : tns) { - System.out.println("token=" + tt); - if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) - && tt.getService().equals(new Text(serviceName))) { - found = true; - } - assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found); - } - } - - @Test - public void testGetTokensForUriWithoutAuth() throws IOException { - FileSystem fs = dfsCluster.getFileSystem(); - HadoopArchives har = new HadoopArchives(jConf); - Path archivePath = new Path(fs.getHomeDirectory(), "tmp"); - String[] args = new String[6]; - args[0] = "-archiveName"; - args[1] = "foo1.har"; - args[2] = "-p"; - args[3] = fs.getHomeDirectory().toString(); - args[4] = "test"; - args[5] = archivePath.toString(); - try { - int ret = ToolRunner.run(har, args); - } catch (Exception e) { - fail("Could not create har file"); - } - Path finalPath = new Path(archivePath, "foo1.har"); - Path filePath = new Path(finalPath, "test"); - - Credentials credentials = new Credentials(); - TokenCache.obtainTokensForNamenodesInternal( - credentials, new Path [] {finalPath}, jConf); - } -} From d4d2bf73a9181a5bfdc0fd99328c7ee4ec998b4e Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Tue, 28 Aug 2012 22:09:27 +0000 Subject: [PATCH 19/62] HDFS-3849. When re-loading the FSImage, we should clear the existing genStamp and leases. Contributed by Colin Patrick McCabe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378364 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/server/namenode/FSImage.java | 5 +- .../hdfs/server/namenode/FSNamesystem.java | 17 ++++++ .../hdfs/server/namenode/LeaseManager.java | 6 +++ .../server/namenode/SecondaryNameNode.java | 5 ++ .../hdfs/server/namenode/TestCheckpoint.java | 53 +++++++++++++++++++ .../server/namenode/TestFSNamesystem.java | 19 +++++++ 7 files changed, 105 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 42814c19fd..55956076e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -690,6 +690,9 @@ Branch-2 ( Unreleased changes ) HDFS-3860. HeartbeatManager#Monitor may wrongly hold the writelock of namesystem. (Jing Zhao via atm) + HDFS-3849. When re-loading the FSImage, we should clear the existing + genStamp and leases. (Colin Patrick McCabe via atm) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 3f941f05ea..6511285b13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; @@ -555,9 +556,7 @@ void openEditLogForWrite() throws IOException { * file. */ void reloadFromImageFile(File file, FSNamesystem target) throws IOException { - target.dir.reset(); - target.dtSecretManager.reset(); - + target.clear(); LOG.debug("Reloading namespace from " + file); loadFSImage(file, target, null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f5125923e9..0a5a550b72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -365,6 +365,23 @@ private static final void logAuditEvent(boolean succeeded, private final boolean haEnabled; + /** + * Clear all loaded data + */ + void clear() { + dir.reset(); + dtSecretManager.reset(); + generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP); + leaseManager.removeAllLeases(); + } + + @VisibleForTesting + LeaseManager getLeaseManager() { + return leaseManager; + } + + /** + /** * Instantiates an FSNamesystem loaded from the image and edits * directories specified in the passed Configuration. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index d77c6b5ef1..88b7a2a13f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -159,6 +159,12 @@ synchronized void removeLease(String holder, String src) { } } + synchronized void removeAllLeases() { + sortedLeases.clear(); + sortedLeasesByPath.clear(); + leases.clear(); + } + /** * Reassign lease for file src to the new holder. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 3b4d6d3907..9a44b1ec8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -140,6 +140,11 @@ public String toString() { FSImage getFSImage() { return checkpointImage; } + + @VisibleForTesting + FSNamesystem getFSNamesystem() { + return namesystem; + } @VisibleForTesting void setFSImage(CheckpointStorage image) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 38479e0358..58fbfec31d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -1924,6 +1924,59 @@ public void testSecondaryNameNodeWithDelegationTokens() throws IOException { } } } + + /** + * Regression test for HDFS-3849. This makes sure that when we re-load the + * FSImage in the 2NN, we clear the existing leases. + */ + @Test + public void testSecondaryNameNodeWithSavedLeases() throws IOException { + MiniDFSCluster cluster = null; + SecondaryNameNode secondary = null; + FSDataOutputStream fos = null; + Configuration conf = new HdfsConfiguration(); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(true).build(); + FileSystem fs = cluster.getFileSystem(); + fos = fs.create(new Path("tmpfile")); + fos.write(new byte[] { 0, 1, 2, 3 }); + fos.hflush(); + assertEquals(1, cluster.getNamesystem().getLeaseManager().countLease()); + + secondary = startSecondaryNameNode(conf); + assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease()); + + // Checkpoint once, so the 2NN loads the lease into its in-memory sate. + secondary.doCheckpoint(); + assertEquals(1, secondary.getFSNamesystem().getLeaseManager().countLease()); + fos.close(); + fos = null; + + // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN + // therefore needs to download a new fsimage the next time it performs a + // checkpoint. + cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER); + cluster.getNameNodeRpc().saveNamespace(); + cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + // Ensure that the 2NN can still perform a checkpoint. + secondary.doCheckpoint(); + + // And the leases have been cleared... + assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease()); + } finally { + if (fos != null) { + fos.close(); + } + if (secondary != null) { + secondary.shutdown(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } @Test public void testCommandLineParsing() throws ParseException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java index f0c5c688f4..4a66cba7b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java @@ -26,6 +26,9 @@ import java.util.Collection; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.junit.Test; public class TestFSNamesystem { @@ -45,4 +48,20 @@ public void testUniqueEditDirs() throws IOException { assertEquals(2, editsDirs.size()); } + /** + * Test that FSNamesystem#clear clears all leases. + */ + @Test + public void testFSNamespaceClearLeases() throws Exception { + Configuration conf = new HdfsConfiguration(); + NameNode.initMetrics(conf, NamenodeRole.NAMENODE); + DFSTestUtil.formatNameNode(conf); + FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); + LeaseManager leaseMan = fsn.getLeaseManager(); + leaseMan.addLease("client1", "importantFile"); + assertEquals(1, leaseMan.countLease()); + fsn.clear(); + leaseMan = fsn.getLeaseManager(); + assertEquals(0, leaseMan.countLease()); + } } From b2d186d865a878ecfdb25a6b53f9b3275b9a693c Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 29 Aug 2012 01:19:07 +0000 Subject: [PATCH 20/62] HDFS-3864. NN does not update internal file mtime for OP_CLOSE when reading from the edit log. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378413 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSEditLogLoader.java | 4 +- .../org/apache/hadoop/hdfs/TestModTime.java | 43 +++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 55956076e5..248c133967 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -693,6 +693,9 @@ Branch-2 ( Unreleased changes ) HDFS-3849. When re-loading the FSImage, we should clear the existing genStamp and leases. (Colin Patrick McCabe via atm) + HDFS-3864. NN does not update internal file mtime for OP_CLOSE when reading + from the edit log. (atm) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 78b28a9588..5a874fc136 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -302,7 +302,9 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, addCloseOp.path); } - // Update in-memory data structures + // Update the salient file attributes. + oldFile.setAccessTime(addCloseOp.atime); + oldFile.setModificationTimeForce(addCloseOp.mtime); updateBlocks(fsDir, addCloseOp, oldFile); // Now close the file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java index f30de965fc..e5ef1acfec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.io.OutputStream; import java.net.InetSocketAddress; import java.util.Random; @@ -32,12 +33,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.util.ThreadUtil; import org.junit.Test; /** * This class tests the decommissioning of nodes. */ public class TestModTime { + static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; static final int fileSize = 16384; @@ -186,6 +189,46 @@ public void testModTime() throws IOException { cluster.shutdown(); } } + + /** + * Regression test for HDFS-3864 - NN does not update internal file mtime for + * OP_CLOSE when reading from the edit log. + */ + @Test + public void testModTimePersistsAfterRestart() throws IOException { + final long sleepTime = 10; // 10 milliseconds + MiniDFSCluster cluster = null; + FileSystem fs = null; + Configuration conf = new HdfsConfiguration(); + try { + cluster = new MiniDFSCluster.Builder(conf).build(); + fs = cluster.getFileSystem(); + Path testPath = new Path("/test"); + + // Open a file, and get its initial modification time. + OutputStream out = fs.create(testPath); + long initialModTime = fs.getFileStatus(testPath).getModificationTime(); + assertTrue(initialModTime > 0); + + // Wait and then close the file. Ensure that the mod time goes up. + ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime); + out.close(); + long modTimeAfterClose = fs.getFileStatus(testPath).getModificationTime(); + assertTrue(modTimeAfterClose >= initialModTime + sleepTime); + + // Restart the NN, and make sure that the later mod time is still used. + cluster.restartNameNode(); + long modTimeAfterRestart = fs.getFileStatus(testPath).getModificationTime(); + assertEquals(modTimeAfterClose, modTimeAfterRestart); + } finally { + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } public static void main(String[] args) throws Exception { new TestModTime().testModTime(); From e5b1b7ddd294ee26658621ec54734ee03dfb9956 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Wed, 29 Aug 2012 05:30:06 +0000 Subject: [PATCH 21/62] HADOOP-8737. cmake: always use JAVA_HOME to find libjvm.so, jni.h, jni_md.h. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378444 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../hadoop-common/src/JNIFlags.cmake | 47 ++++++++++++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d89a33ee11..e42069dbf8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -447,6 +447,9 @@ Branch-2 ( Unreleased changes ) HADOOP-8738. junit JAR is showing up in the distro (tucu) + HADOOP-8737. cmake: always use JAVA_HOME to find libjvm.so, jni.h, jni_md.h. + (Colin Patrick McCabe via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HADOOP-8220. ZKFailoverController doesn't handle failure to become active diff --git a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake index 9ed2bf559f..b1b6520de6 100644 --- a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake +++ b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake @@ -65,4 +65,49 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux" endif (READELF MATCHES "NOTFOUND") endif (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux") -find_package(JNI REQUIRED) +IF("${CMAKE_SYSTEM}" MATCHES "Linux") + # + # Locate JNI_INCLUDE_DIRS and JNI_LIBRARIES. + # Since we were invoked from Maven, we know that the JAVA_HOME environment + # variable is valid. So we ignore system paths here and just use JAVA_HOME. + # + FILE(TO_CMAKE_PATH "$ENV{JAVA_HOME}" _JAVA_HOME) + IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$") + SET(_java_libarch "i386") + ELSEIF (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") + SET(_java_libarch "amd64") + ELSE() + SET(_java_libarch ${CMAKE_SYSTEM_PROCESSOR}) + ENDIF() + SET(_JDK_DIRS "${_JAVA_HOME}/jre/lib/${_java_libarch}/*" + "${_JAVA_HOME}/jre/lib/${_java_libarch}" + "${_JAVA_HOME}/jre/lib/*" + "${_JAVA_HOME}/jre/lib" + "${_JAVA_HOME}/lib/*" + "${_JAVA_HOME}/lib" + "${_JAVA_HOME}/include/*" + "${_JAVA_HOME}/include" + "${_JAVA_HOME}" + ) + FIND_PATH(JAVA_INCLUDE_PATH + NAMES jni.h + PATHS ${_JDK_DIRS} + NO_DEFAULT_PATH) + FIND_PATH(JAVA_INCLUDE_PATH2 + NAMES jni_md.h + PATHS ${_JDK_DIRS} + NO_DEFAULT_PATH) + SET(JNI_INCLUDE_DIRS ${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH2}) + FIND_LIBRARY(JAVA_JVM_LIBRARY + NAMES jvm JavaVM + PATHS ${_JDK_DIRS} + NO_DEFAULT_PATH) + SET(JNI_LIBRARIES ${JAVA_JVM_LIBRARY}) + IF((NOT JAVA_JVM_LIBRARY) OR (NOT JAVA_INCLUDE_PATH) OR (NOT JAVA_INCLUDE_PATH2)) + MESSAGE("JAVA_HOME=${JAVA_HOME}, JAVA_JVM_LIBRARY=${JAVA_JVM_LIBRARY}") + MESSAGE("JAVA_INCLUDE_PATH=${JAVA_INCLUDE_PATH}, JAVA_INCLUDE_PATH2=${JAVA_INCLUDE_PATH2}") + MESSAGE(FATAL_ERROR "Failed to find a viable JVM installation under JAVA_HOME.") + ENDIF() +ELSE() + find_package(JNI REQUIRED) +ENDIF() From 8449526f0c9e2807f6979391906b3eabb3bd009b Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Wed, 29 Aug 2012 16:51:33 +0000 Subject: [PATCH 22/62] Moved MAPREDUCE-4323 to YARN-58 and updated CHANGES.txt to reflect this. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378637 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 9942271c64..50538dc6a1 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -63,7 +63,7 @@ Release 0.23.3 - Unreleased YARN-27. Failed refreshQueues due to misconfiguration prevents further refreshing of queues (Arun Murthy via tgraves) - MAPREDUCE-4323. NM leaks filesystems (Jason Lowe via jeagles) + YARN-58. NM leaks filesystems (Jason Lowe via jeagles) YARN-39. RM-NM secret-keys should be randomly generated and rolled every so often. (vinodkv and sseth via sseth) From a310557da8f8027243633e70b8885dfeb12c7aa2 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Wed, 29 Aug 2012 23:40:51 +0000 Subject: [PATCH 23/62] HADOOP-8747. Syntax error on cmake version 2.6 patch 2 in JNIFlags.cmake. (cmccabe via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378770 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-common-project/hadoop-common/src/JNIFlags.cmake | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e42069dbf8..2eee1d4594 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -450,6 +450,8 @@ Branch-2 ( Unreleased changes ) HADOOP-8737. cmake: always use JAVA_HOME to find libjvm.so, jni.h, jni_md.h. (Colin Patrick McCabe via eli) + HADOOP-8747. Syntax error on cmake version 2.6 patch 2 in JNIFlags.cmake. (cmccabe via tucu) + BREAKDOWN OF HDFS-3042 SUBTASKS HADOOP-8220. ZKFailoverController doesn't handle failure to become active diff --git a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake index b1b6520de6..617fccd2b7 100644 --- a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake +++ b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake @@ -103,9 +103,11 @@ IF("${CMAKE_SYSTEM}" MATCHES "Linux") PATHS ${_JDK_DIRS} NO_DEFAULT_PATH) SET(JNI_LIBRARIES ${JAVA_JVM_LIBRARY}) - IF((NOT JAVA_JVM_LIBRARY) OR (NOT JAVA_INCLUDE_PATH) OR (NOT JAVA_INCLUDE_PATH2)) - MESSAGE("JAVA_HOME=${JAVA_HOME}, JAVA_JVM_LIBRARY=${JAVA_JVM_LIBRARY}") - MESSAGE("JAVA_INCLUDE_PATH=${JAVA_INCLUDE_PATH}, JAVA_INCLUDE_PATH2=${JAVA_INCLUDE_PATH2}") + MESSAGE("JAVA_HOME=${JAVA_HOME}, JAVA_JVM_LIBRARY=${JAVA_JVM_LIBRARY}") + MESSAGE("JAVA_INCLUDE_PATH=${JAVA_INCLUDE_PATH}, JAVA_INCLUDE_PATH2=${JAVA_INCLUDE_PATH2}") + IF(JAVA_JVM_LIBRARY AND JAVA_INCLUDE_PATH AND JAVA_INCLUDE_PATH2) + MESSAGE("Located all JNI components successfully.") + ELSE() MESSAGE(FATAL_ERROR "Failed to find a viable JVM installation under JAVA_HOME.") ENDIF() ELSE() From 95af35161553cb07c1624caaadaacb131a025c11 Mon Sep 17 00:00:00 2001 From: Thomas White Date: Thu, 30 Aug 2012 09:57:45 +0000 Subject: [PATCH 24/62] MAPREDUCE-4608. hadoop-mapreduce-client is missing some dependencies. Contributed by Alejandro Abdelnur. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378873 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hadoop-mapreduce-client/pom.xml | 26 ++++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 89afe5c4d0..b6ff5808a7 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -184,6 +184,9 @@ Branch-2 ( Unreleased changes ) MAPREDUCE-4470. Fix TestCombineFileInputFormat.testForEmptyFile (ikatsov via tucu) + MAPREDUCE-4608. hadoop-mapreduce-client is missing some dependencies. + (tucu via tomwhite) + Release 2.1.0-alpha - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index f936762c51..9e6a03e9d0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -133,7 +133,31 @@ org.jboss.netty netty - + + commons-logging + commons-logging + provided + + + com.google.guava + guava + provided + + + commons-codec + commons-codec + provided + + + commons-cli + commons-cli + provided + + + commons-lang + commons-lang + provided + From ec94ea265825b1fcb191e93a5435abfd96a2d8bc Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 30 Aug 2012 14:50:18 +0000 Subject: [PATCH 25/62] HADOOP-8748. Refactor DFSClient retry utility methods to a new class in org.apache.hadoop.io.retry. Contributed by Arun C Murthy git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378969 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../apache/hadoop/io/retry/RetryUtils.java | 164 ++++++++++++++++++ 2 files changed, 167 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2eee1d4594..6bd92f8af6 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -320,6 +320,9 @@ Branch-2 ( Unreleased changes ) HADOOP-8075. Lower native-hadoop library log from info to debug. (Hızır Sefa İrken via eli) + HADOOP-8748. Refactor DFSClient retry utility methods to a new class + in org.apache.hadoop.io.retry. (Arun C Murthy via szetszwo) + BUG FIXES HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java new file mode 100644 index 0000000000..2a65d16fdb --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java @@ -0,0 +1,164 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io.retry; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.RemoteException; + +import com.google.protobuf.ServiceException; + +public class RetryUtils { + public static final Log LOG = LogFactory.getLog(RetryUtils.class); + + /** + * Return the default retry policy set in conf. + * + * If the value retryPolicyEnabledKey is set to false in conf, + * use TRY_ONCE_THEN_FAIL. + * + * Otherwise, get the MultipleLinearRandomRetry policy specified in the conf + * and then + * (1) use multipleLinearRandomRetry for + * - remoteExceptionToRetry, or + * - IOException other than RemoteException, or + * - ServiceException; and + * (2) use TRY_ONCE_THEN_FAIL for + * - non-remoteExceptionToRetry RemoteException, or + * - non-IOException. + * + * + * @param conf + * @param retryPolicyEnabledKey conf property key for enabling retry + * @param defaultRetryPolicyEnabled default retryPolicyEnabledKey conf value + * @param retryPolicySpecKey conf property key for retry policy spec + * @param defaultRetryPolicySpec default retryPolicySpecKey conf value + * @param remoteExceptionToRetry The particular RemoteException to retry + * @return the default retry policy. + */ + public static RetryPolicy getDefaultRetryPolicy( + Configuration conf, + String retryPolicyEnabledKey, + boolean defaultRetryPolicyEnabled, + String retryPolicySpecKey, + String defaultRetryPolicySpec, + final Class remoteExceptionToRetry + ) { + + final RetryPolicy multipleLinearRandomRetry = + getMultipleLinearRandomRetry( + conf, + retryPolicyEnabledKey, defaultRetryPolicyEnabled, + retryPolicySpecKey, defaultRetryPolicySpec + ); + + if (LOG.isDebugEnabled()) { + LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry); + } + + if (multipleLinearRandomRetry == null) { + //no retry + return RetryPolicies.TRY_ONCE_THEN_FAIL; + } else { + return new RetryPolicy() { + @Override + public RetryAction shouldRetry(Exception e, int retries, int failovers, + boolean isMethodIdempotent) throws Exception { + if (e instanceof ServiceException) { + //unwrap ServiceException + final Throwable cause = e.getCause(); + if (cause != null && cause instanceof Exception) { + e = (Exception)cause; + } + } + + //see (1) and (2) in the javadoc of this method. + final RetryPolicy p; + if (e instanceof RemoteException) { + final RemoteException re = (RemoteException)e; + p = remoteExceptionToRetry.getName().equals(re.getClassName())? + multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL; + } else if (e instanceof IOException || e instanceof ServiceException) { + p = multipleLinearRandomRetry; + } else { //non-IOException + p = RetryPolicies.TRY_ONCE_THEN_FAIL; + } + + if (LOG.isDebugEnabled()) { + LOG.debug("RETRY " + retries + ") policy=" + + p.getClass().getSimpleName() + ", exception=" + e); + } + LOG.info("RETRY " + retries + ") policy=" + + p.getClass().getSimpleName() + ", exception=" + e); + return p.shouldRetry(e, retries, failovers, isMethodIdempotent); + } + + @Override + public String toString() { + return "RetryPolicy[" + multipleLinearRandomRetry + ", " + + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName() + + "]"; + } + }; + } + } + + /** + * Return the MultipleLinearRandomRetry policy specified in the conf, + * or null if the feature is disabled. + * If the policy is specified in the conf but the policy cannot be parsed, + * the default policy is returned. + * + * Retry policy spec: + * N pairs of sleep-time and number-of-retries "s1,n1,s2,n2,..." + * + * @param conf + * @param retryPolicyEnabledKey conf property key for enabling retry + * @param defaultRetryPolicyEnabled default retryPolicyEnabledKey conf value + * @param retryPolicySpecKey conf property key for retry policy spec + * @param defaultRetryPolicySpec default retryPolicySpecKey conf value + * @return the MultipleLinearRandomRetry policy specified in the conf, + * or null if the feature is disabled. + */ + public static RetryPolicy getMultipleLinearRandomRetry( + Configuration conf, + String retryPolicyEnabledKey, + boolean defaultRetryPolicyEnabled, + String retryPolicySpecKey, + String defaultRetryPolicySpec + ) { + final boolean enabled = + conf.getBoolean(retryPolicyEnabledKey, defaultRetryPolicyEnabled); + if (!enabled) { + return null; + } + + final String policy = conf.get(retryPolicySpecKey, defaultRetryPolicySpec); + + final RetryPolicy r = + RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString( + policy); + return (r != null) ? + r : + RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString( + defaultRetryPolicySpec); + } +} From 2f1a133dcf8dc9e629a5c2829b1a116d66db4944 Mon Sep 17 00:00:00 2001 From: Thomas White Date: Thu, 30 Aug 2012 17:12:14 +0000 Subject: [PATCH 26/62] MAPREDUCE-4610. Support deprecated mapreduce.job.counters.limit property in MR2. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379022 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/mapreduce/counters/Limits.java | 4 +++- .../java/org/apache/hadoop/mapreduce/util/ConfigUtil.java | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index b6ff5808a7..72786c396f 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -187,6 +187,9 @@ Branch-2 ( Unreleased changes ) MAPREDUCE-4608. hadoop-mapreduce-client is missing some dependencies. (tucu via tomwhite) + MAPREDUCE-4610. Support deprecated mapreduce.job.counters.limit property in + MR2. (tomwhite) + Release 2.1.0-alpha - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java index d22ac7011a..b82ebc65dc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java @@ -20,12 +20,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapred.JobConf; + import static org.apache.hadoop.mapreduce.MRJobConfig.*; @InterfaceAudience.Private public class Limits { - static final Configuration conf = new Configuration(); + static final Configuration conf = new JobConf(); public static final int GROUP_NAME_MAX = conf.getInt(COUNTER_GROUP_NAME_MAX_KEY, COUNTER_GROUP_NAME_MAX_DEFAULT); public static final int COUNTER_NAME_MAX = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java index f5dbc3a7f1..dbbd786edf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java @@ -376,6 +376,8 @@ private static void addDeprecatedKeys() { new String[] {MRJobConfig.REDUCE_SKIP_MAXGROUPS}); Configuration.addDeprecation("mapred.reduce.child.log.level", new String[] {MRJobConfig.REDUCE_LOG_LEVEL}); + Configuration.addDeprecation("mapreduce.job.counters.limit", + new String[] {MRJobConfig.COUNTERS_MAX_KEY}); Configuration.addDeprecation("jobclient.completion.poll.interval", new String[] {Job.COMPLETION_POLL_INTERVAL_KEY}); Configuration.addDeprecation("jobclient.progress.monitor.poll.interval", From bb64f860eb5abec7c03aa4d59f1aae30a52da2a7 Mon Sep 17 00:00:00 2001 From: Daryn Sharp Date: Thu, 30 Aug 2012 19:46:14 +0000 Subject: [PATCH 27/62] HDFS-3861. Deadlock in DFSClient (Kihwal Lee via daryn) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379093 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 248c133967..da009a4a30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1574,6 +1574,8 @@ Release 0.23.3 - UNRELEASED HDFS-3718. Datanode won't shutdown because of runaway DataBlockScanner thread (Kihwal Lee via daryn) + HDFS-3861. Deadlock in DFSClient (Kihwal Lee via daryn) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 446dfd51a7..97a146432d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -533,7 +533,7 @@ void checkOpen() throws IOException { * until the first output stream is created. The same instance will * be returned until all output streams are closed. */ - public synchronized LeaseRenewer getLeaseRenewer() throws IOException { + public LeaseRenewer getLeaseRenewer() throws IOException { return LeaseRenewer.getInstance(authority, ugi, this); } From 6f6e170325d39f9f7b543a39791b2cb54692f83d Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Thu, 30 Aug 2012 19:58:07 +0000 Subject: [PATCH 28/62] HADOOP-8726. The Secrets in Credentials are not available to MR tasks (daryn and Benoy Antony via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379100 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../apache/hadoop/security/Credentials.java | 6 -- .../hadoop/security/UserGroupInformation.java | 75 +++++++------------ .../hadoop/security/TestCredentials.java | 2 +- .../security/TestUserGroupInformation.java | 73 ++++++++++++++++++ .../org/apache/hadoop/mapred/YarnChild.java | 2 +- .../hadoop/mapreduce/v2/app/MRAppMaster.java | 2 +- .../org/apache/hadoop/mapreduce/TestJob.java | 17 +++-- 8 files changed, 117 insertions(+), 63 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 6bd92f8af6..2b0ba1da2d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -993,6 +993,9 @@ Release 0.23.3 - UNRELEASED HADOOP-8725. MR is broken when security is off (daryn via bobby) + HADOOP-8726. The Secrets in Credentials are not available to MR tasks + (daryn and Benoy Antony via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java index 6d5b048c8a..a258c7f88c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java @@ -274,10 +274,4 @@ private void addAll(Credentials other, boolean overwrite) { } } } - - public void addTokensToUGI(UserGroupInformation ugi) { - for (Map.Entry> token: tokenMap.entrySet()) { - ugi.addToken(token.getKey(), token.getValue()); - } - } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 184b40d8ed..64ca98cf28 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -27,7 +27,6 @@ import java.security.PrivilegedAction; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -646,7 +645,7 @@ static UserGroupInformation getLoginUser() throws IOException { // user. Credentials cred = Credentials.readTokenStorageFile( new Path("file:///" + fileLocation), conf); - cred.addTokensToUGI(loginUser); + loginUser.addCredentials(cred); } loginUser.spawnAutoRenewalThreadForUserCreds(); } catch (LoginException le) { @@ -1176,41 +1175,6 @@ public synchronized boolean addTokenIdentifier(TokenIdentifier tokenId) { public synchronized Set getTokenIdentifiers() { return subject.getPublicCredentials(TokenIdentifier.class); } - - // wrapper to retain the creds key for the token - private class NamedToken { - Text alias; - Token token; - NamedToken(Text alias, Token token) { - this.alias = alias; - this.token = token; - } - @Override - public boolean equals(Object o) { - boolean equals; - if (o == this) { - equals = true; - } else if (!(o instanceof NamedToken)) { - equals = false; - } else { - Text otherAlias = ((NamedToken)o).alias; - if (alias == otherAlias) { - equals = true; - } else { - equals = (otherAlias != null && otherAlias.equals(alias)); - } - } - return equals; - } - @Override - public int hashCode() { - return (alias != null) ? alias.hashCode() : -1; - } - @Override - public String toString() { - return "NamedToken: alias="+alias+" token="+token; - } - } /** * Add a token to this UGI @@ -1219,7 +1183,7 @@ public String toString() { * @return true on successful add of new token */ public synchronized boolean addToken(Token token) { - return addToken(token.getService(), token); + return (token != null) ? addToken(token.getService(), token) : false; } /** @@ -1231,10 +1195,8 @@ public synchronized boolean addToken(Token token) { */ public synchronized boolean addToken(Text alias, Token token) { - NamedToken namedToken = new NamedToken(alias, token); - Collection ugiCreds = subject.getPrivateCredentials(); - ugiCreds.remove(namedToken); // allow token to be replaced - return ugiCreds.add(new NamedToken(alias, token)); + getCredentialsInternal().addToken(alias, token); + return true; } /** @@ -1244,8 +1206,8 @@ public synchronized boolean addToken(Text alias, */ public synchronized Collection> getTokens() { - return Collections.unmodifiableList( - new ArrayList>(getCredentials().getAllTokens())); + return Collections.unmodifiableCollection( + getCredentialsInternal().getAllTokens()); } /** @@ -1254,11 +1216,26 @@ Collection> getTokens() { * @return Credentials of tokens associated with this user */ public synchronized Credentials getCredentials() { - final Credentials credentials = new Credentials(); - final Set namedTokens = - subject.getPrivateCredentials(NamedToken.class); - for (final NamedToken namedToken : namedTokens) { - credentials.addToken(namedToken.alias, namedToken.token); + return new Credentials(getCredentialsInternal()); + } + + /** + * Add the given Credentials to this user. + * @param credentials of tokens and secrets + */ + public synchronized void addCredentials(Credentials credentials) { + getCredentialsInternal().addAll(credentials); + } + + private synchronized Credentials getCredentialsInternal() { + final Credentials credentials; + final Set credentialsSet = + subject.getPrivateCredentials(Credentials.class); + if (!credentialsSet.isEmpty()){ + credentials = credentialsSet.iterator().next(); + } else { + credentials = new Credentials(); + subject.getPrivateCredentials().add(credentials); } return credentials; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java index 72d02dbc6e..cad0262a92 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java @@ -220,7 +220,7 @@ public void testAddTokensToUGI() { for (int i=0; i < service.length; i++) { creds.addToken(service[i], token[i]); } - creds.addTokensToUGI(ugi); + ugi.addCredentials(creds); creds = ugi.getCredentials(); for (int i=0; i < service.length; i++) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index ce8ee28207..a1bbd984d1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -250,6 +250,70 @@ public void testAddToken() throws Exception { ugi.addToken(t1); checkTokens(ugi, t1, t2, t3); } + + @SuppressWarnings("unchecked") // from Mockito mocks + @Test + public void testGetCreds() throws Exception { + UserGroupInformation ugi = + UserGroupInformation.createRemoteUser("someone"); + + Text service = new Text("service"); + Token t1 = mock(Token.class); + when(t1.getService()).thenReturn(service); + Token t2 = mock(Token.class); + when(t2.getService()).thenReturn(new Text("service2")); + Token t3 = mock(Token.class); + when(t3.getService()).thenReturn(service); + + // add token to ugi + ugi.addToken(t1); + ugi.addToken(t2); + checkTokens(ugi, t1, t2); + + Credentials creds = ugi.getCredentials(); + creds.addToken(t3.getService(), t3); + assertSame(t3, creds.getToken(service)); + // check that ugi wasn't modified + checkTokens(ugi, t1, t2); + } + + @SuppressWarnings("unchecked") // from Mockito mocks + @Test + public void testAddCreds() throws Exception { + UserGroupInformation ugi = + UserGroupInformation.createRemoteUser("someone"); + + Text service = new Text("service"); + Token t1 = mock(Token.class); + when(t1.getService()).thenReturn(service); + Token t2 = mock(Token.class); + when(t2.getService()).thenReturn(new Text("service2")); + byte[] secret = new byte[]{}; + Text secretKey = new Text("sshhh"); + + // fill credentials + Credentials creds = new Credentials(); + creds.addToken(t1.getService(), t1); + creds.addToken(t2.getService(), t2); + creds.addSecretKey(secretKey, secret); + + // add creds to ugi, and check ugi + ugi.addCredentials(creds); + checkTokens(ugi, t1, t2); + assertSame(secret, ugi.getCredentials().getSecretKey(secretKey)); + } + + @SuppressWarnings("unchecked") // from Mockito mocks + @Test + public void testGetCredsNotSame() + throws Exception { + UserGroupInformation ugi = + UserGroupInformation.createRemoteUser("someone"); + Credentials creds = ugi.getCredentials(); + // should always get a new copy + assertNotSame(creds, ugi.getCredentials()); + } + private void checkTokens(UserGroupInformation ugi, Token ... tokens) { // check the ugi's token collection @@ -299,13 +363,22 @@ public void testUGITokens() throws Exception { Token t2 = mock(Token.class); when(t2.getService()).thenReturn(new Text("t2")); + Credentials creds = new Credentials(); + byte[] secretKey = new byte[]{}; + Text secretName = new Text("shhh"); + creds.addSecretKey(secretName, secretKey); + ugi.addToken(t1); ugi.addToken(t2); + ugi.addCredentials(creds); Collection> z = ugi.getTokens(); assertTrue(z.contains(t1)); assertTrue(z.contains(t2)); assertEquals(2, z.size()); + Credentials ugiCreds = ugi.getCredentials(); + assertSame(secretKey, ugiCreds.getSecretKey(secretName)); + assertEquals(1, ugiCreds.numberOfSecretKeys()); try { z.remove(t1); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java index 64ac83e8cf..c05c7aa69d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java @@ -141,7 +141,7 @@ public TaskUmbilicalProtocol run() throws Exception { childUGI = UserGroupInformation.createRemoteUser(System .getenv(ApplicationConstants.Environment.USER.toString())); // Add tokens to new user so that it may execute its task correctly. - job.getCredentials().addTokensToUGI(childUGI); + childUGI.addCredentials(credentials); // Create a final reference to the task for the doAs block final Task taskFinal = task; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 463a3edec6..d80653767e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -487,7 +487,7 @@ protected void downloadTokensAndSetupUGI(Configuration conf) { fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf)); LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile); - fsTokens.addTokensToUGI(currentUser); // For use by AppMaster itself. + currentUser.addCredentials(fsTokens); // For use by AppMaster itself. } } catch (IOException e) { throw new YarnException(e); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java index 6d2f5e6b69..94f49acf97 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java @@ -27,6 +27,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.JobStatus.State; import org.apache.hadoop.mapreduce.protocol.ClientProtocol; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.junit.Assert; @@ -55,14 +56,20 @@ public void testJobToString() throws IOException, InterruptedException { @Test public void testUGICredentialsPropogation() throws Exception { + Credentials creds = new Credentials(); Token token = mock(Token.class); - Text service = new Text("service"); - - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - ugi.addToken(service, token); + Text tokenService = new Text("service"); + Text secretName = new Text("secret"); + byte secret[] = new byte[]{}; + + creds.addToken(tokenService, token); + creds.addSecretKey(secretName, secret); + UserGroupInformation.getLoginUser().addCredentials(creds); JobConf jobConf = new JobConf(); Job job = new Job(jobConf); - assertSame(token, job.getCredentials().getToken(service)); + + assertSame(token, job.getCredentials().getToken(tokenService)); + assertSame(secret, job.getCredentials().getSecretKey(secretName)); } } From 65e447cd8bd57dd46e14502325b63bbe945fc8e7 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Thu, 30 Aug 2012 21:02:55 +0000 Subject: [PATCH 29/62] MAPREDUCE-4569. Fixed TestHsWebServicesJobsQuery to pass on JDK7 by not depending on test order. Contributed by Thomas Graves. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379146 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hs/webapp/TestHsWebServicesJobsQuery.java | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 72786c396f..5c88ff8217 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -553,6 +553,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4375. Show Configuration Tracability in MR UI (bobby via tgraves) + MAPREDUCE-4569. Fixed TestHsWebServicesJobsQuery to pass on JDK7 by not + depending on test order. (Thomas Graves via vinodkv) + OPTIMIZATIONS MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java index 5d5da9d551..0b23c95229 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Map; @@ -217,9 +218,23 @@ public TestHsWebServicesJobsQuery() { @Test public void testJobsQueryStateNone() throws JSONException, Exception { WebResource r = resource(); + + ArrayList JOB_STATES = + new ArrayList(Arrays.asList(JobState.values())); + + // find a state that isn't in use + Map jobsMap = appContext.getAllJobs(); + for (Map.Entry entry : jobsMap.entrySet()) { + JOB_STATES.remove(entry.getValue().getState()); + } + + assertTrue("No unused job states", JOB_STATES.size() > 0); + JobState notInUse = JOB_STATES.get(0); + ClientResponse response = r.path("ws").path("v1").path("history") - .path("mapreduce").path("jobs").queryParam("state", JobState.KILL_WAIT.toString()) + .path("mapreduce").path("jobs").queryParam("state", notInUse.toString()) .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); From 34d1e73235455170c8a2e199c36bf45973c88d24 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 30 Aug 2012 21:40:28 +0000 Subject: [PATCH 30/62] HDFS-3837. Fix DataNode.recoverBlock findbugs warning. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379172 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop-hdfs/dev-support/findbugsExcludeFile.xml | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index da009a4a30..3b37d41a71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -696,6 +696,8 @@ Branch-2 ( Unreleased changes ) HDFS-3864. NN does not update internal file mtime for OP_CLOSE when reading from the edit log. (atm) + HDFS-3837. Fix DataNode.recoverBlock findbugs warning. (eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 3461d6afe4..7c9f3646e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -273,4 +273,10 @@ + + + + + + From 7d1c8d92f9b4b83c6ee154cd9ff70724bc61599f Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 31 Aug 2012 04:08:23 +0000 Subject: [PATCH 31/62] HDFS-3733. Audit logs should include WebHDFS access. Contributed by Andy Isaacson git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379278 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hdfs/server/namenode/FSNamesystem.java | 85 +++++++----- .../web/resources/NamenodeWebHdfsMethods.java | 19 ++- .../hdfs/server/namenode/NameNodeAdapter.java | 2 +- .../hdfs/server/namenode/TestAuditLogs.java | 127 ++++++++++++++++-- .../hadoop/hdfs/server/namenode/TestFsck.java | 12 +- .../resources/TestWebHdfsDataLocality.java | 1 - 7 files changed, 204 insertions(+), 44 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3b37d41a71..2cadb27278 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -698,6 +698,8 @@ Branch-2 ( Unreleased changes ) HDFS-3837. Fix DataNode.recoverBlock findbugs warning. (eli) + HDFS-3733. Audit logs should include WebHDFS access. (Andy Isaacson via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 0a5a550b72..23ebffa6b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -169,6 +169,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; @@ -1071,7 +1072,7 @@ void setPermission(String src, FsPermission permission) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setPermission", src, null, null); } throw e; @@ -1100,7 +1101,7 @@ private void setPermissionInt(String src, FsPermission permission) getEditLog().logSync(); if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setPermission", src, null, resultingStat); } } @@ -1117,7 +1118,7 @@ void setOwner(String src, String username, String group) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setOwner", src, null, null); } throw e; @@ -1155,7 +1156,7 @@ private void setOwnerInt(String src, String username, String group) getEditLog().logSync(); if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setOwner", src, null, resultingStat); } } @@ -1190,7 +1191,7 @@ LocatedBlocks getBlockLocations(String src, long offset, long length, } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "open", src, null, null); } throw e; @@ -1216,7 +1217,7 @@ private LocatedBlocks getBlockLocationsInt(String src, long offset, long length, offset, length, doAccessTime, needBlockToken); if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "open", src, null, null); } if (checkSafeMode && isInSafeMode()) { @@ -1301,7 +1302,7 @@ void concat(String target, String [] srcs) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getLoginUser(), - Server.getRemoteIp(), + getRemoteIp(), "concat", Arrays.toString(srcs), target, null); } throw e; @@ -1351,7 +1352,7 @@ private void concatInt(String target, String [] srcs) getEditLog().logSync(); if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getLoginUser(), - Server.getRemoteIp(), + getRemoteIp(), "concat", Arrays.toString(srcs), target, resultingStat); } } @@ -1468,7 +1469,7 @@ void setTimes(String src, long mtime, long atime) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setTimes", src, null, null); } throw e; @@ -1495,7 +1496,7 @@ private void setTimesInt(String src, long mtime, long atime) if (auditLog.isInfoEnabled() && isExternalInvocation()) { final HdfsFileStatus stat = dir.getFileInfo(src, false); logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setTimes", src, null, stat); } } else { @@ -1517,7 +1518,7 @@ void createSymlink(String target, String link, } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "createSymlink", link, target, null); } throw e; @@ -1545,7 +1546,7 @@ private void createSymlinkInt(String target, String link, getEditLog().logSync(); if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "createSymlink", link, target, resultingStat); } } @@ -1601,7 +1602,7 @@ boolean setReplication(final String src, final short replication) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setReplication", src, null, null); } throw e; @@ -1637,7 +1638,7 @@ private boolean setReplicationInt(final String src, final short replication) getEditLog().logSync(); if (isFile && auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "setReplication", src, null, null); } return isFile; @@ -1694,7 +1695,7 @@ void startFile(String src, PermissionStatus permissions, String holder, } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "create", src, null, null); } throw e; @@ -1719,7 +1720,7 @@ private void startFileInt(String src, PermissionStatus permissions, String holde if (auditLog.isInfoEnabled() && isExternalInvocation()) { final HdfsFileStatus stat = dir.getFileInfo(src, false); logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "create", src, null, stat); } } @@ -2017,7 +2018,7 @@ LocatedBlock appendFile(String src, String holder, String clientMachine) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "append", src, null, null); } throw e; @@ -2055,7 +2056,7 @@ private LocatedBlock appendFileInt(String src, String holder, String clientMachi } if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "append", src, null, null); } return lb; @@ -2521,7 +2522,7 @@ boolean renameTo(String src, String dst) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "rename", src, dst, null); } throw e; @@ -2550,7 +2551,7 @@ private boolean renameToInt(String src, String dst) getEditLog().logSync(); if (status && auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "rename", src, dst, resultingStat); } return status; @@ -2610,7 +2611,7 @@ void renameTo(String src, String dst, Options.Rename... options) for (Rename option : options) { cmd.append(option.value()).append(" "); } - logAuditEvent(UserGroupInformation.getCurrentUser(), Server.getRemoteIp(), + logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), cmd.toString(), src, dst, resultingStat); } } @@ -2648,7 +2649,7 @@ boolean delete(String src, boolean recursive) } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "delete", src, null, null); } throw e; @@ -2664,7 +2665,7 @@ private boolean deleteInt(String src, boolean recursive) boolean status = deleteInternal(src, recursive, true); if (status && auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "delete", src, null, null); } return status; @@ -2802,8 +2803,11 @@ private boolean isSafeModeTrackingBlocks() { */ HdfsFileStatus getFileInfo(String src, boolean resolveLink) throws AccessControlException, UnresolvedLinkException, - StandbyException { + StandbyException, IOException { + HdfsFileStatus stat = null; + readLock(); + try { checkOperation(OperationCategory.READ); @@ -2813,10 +2817,23 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink) if (isPermissionEnabled) { checkTraverse(src); } - return dir.getFileInfo(src, resolveLink); + stat = dir.getFileInfo(src, resolveLink); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + getRemoteIp(), + "getfileinfo", src, null, null); + } + throw e; } finally { readUnlock(); } + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(UserGroupInformation.getCurrentUser(), + getRemoteIp(), + "getfileinfo", src, null, null); + } + return stat; } /** @@ -2829,7 +2846,7 @@ boolean mkdirs(String src, PermissionStatus permissions, } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "mkdirs", src, null, null); } throw e; @@ -2854,7 +2871,7 @@ private boolean mkdirsInt(String src, PermissionStatus permissions, if (status && auditLog.isInfoEnabled() && isExternalInvocation()) { final HdfsFileStatus stat = dir.getFileInfo(src, false); logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "mkdirs", src, null, stat); } return status; @@ -3295,7 +3312,7 @@ DirectoryListing getListing(String src, byte[] startAfter, } catch (AccessControlException e) { if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(false, UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "listStatus", src, null, null); } throw e; @@ -3319,7 +3336,7 @@ private DirectoryListing getListingInt(String src, byte[] startAfter, } if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(UserGroupInformation.getCurrentUser(), - Server.getRemoteIp(), + getRemoteIp(), "listStatus", src, null, null); } dl = dir.getListing(src, startAfter, needLocation); @@ -5250,7 +5267,15 @@ private AuthenticationMethod getConnectionAuthenticationMethod() * RPC call context even if the client exits. */ private boolean isExternalInvocation() { - return Server.isRpcInvocation(); + return Server.isRpcInvocation() || NamenodeWebHdfsMethods.isWebHdfsInvocation(); + } + + private static InetAddress getRemoteIp() { + InetAddress ip = Server.getRemoteIp(); + if (ip != null) { + return ip; + } + return NamenodeWebHdfsMethods.getRemoteIp(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 386ff89eeb..0ee13519f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; +import java.net.InetAddress; import java.net.URI; import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; @@ -92,6 +93,7 @@ import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; @@ -116,9 +118,20 @@ public static String getRemoteAddress() { return REMOTE_ADDRESS.get(); } - /** Set the remote client address. */ - static void setRemoteAddress(String remoteAddress) { - REMOTE_ADDRESS.set(remoteAddress); + public static InetAddress getRemoteIp() { + try { + return InetAddress.getByName(getRemoteAddress()); + } catch (Exception e) { + return null; + } + } + + /** + * Returns true if a WebHdfs request is in progress. Akin to + * {@link Server#isRpcInvocation()}. + */ + public static boolean isWebHdfsInvocation() { + return getRemoteAddress() != null; } private @Context ServletContext context; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index c8248fa3b7..ada2e2a405 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -61,7 +61,7 @@ public static LocatedBlocks getBlockLocations(NameNode namenode, public static HdfsFileStatus getFileInfo(NameNode namenode, String src, boolean resolveLink) throws AccessControlException, UnresolvedLinkException, - StandbyException { + StandbyException, IOException { return namenode.getNamesystem().getFileInfo(src, resolveLink); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 59b6cc21c8..eb31ef9710 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -32,13 +32,17 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; @@ -83,6 +87,7 @@ public void setupCluster() throws Exception { final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); + conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); util = new DFSTestUtil.Builder().setName("TestAuditAllowed"). setNumFiles(20).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); @@ -115,6 +120,18 @@ public void testAuditAllowed() throws Exception { assertTrue("failed to read from file", val > 0); } + /** test that allowed stat puts proper entry in audit log */ + @Test + public void testAuditAllowedStat() throws Exception { + final Path file = new Path(fnames[0]); + FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf); + + setupAuditLogs(); + FileStatus st = userfs.getFileStatus(file); + verifyAuditLogs(true); + assertTrue("failed to stat file", st != null && st.isFile()); + } + /** test that denied operation puts proper entry in audit log */ @Test public void testAuditDenied() throws Exception { @@ -135,6 +152,85 @@ public void testAuditDenied() throws Exception { verifyAuditLogs(false); } + /** test that access via webhdfs puts proper entry in audit log */ + @Test + public void testAuditWebHdfs() throws Exception { + final Path file = new Path(fnames[0]); + + fs.setPermission(file, new FsPermission((short)0644)); + fs.setOwner(file, "root", null); + + setupAuditLogs(); + + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); + InputStream istream = webfs.open(file); + int val = istream.read(); + istream.close(); + + verifyAuditLogsRepeat(true, 3); + assertTrue("failed to read from file", val > 0); + } + + /** test that stat via webhdfs puts proper entry in audit log */ + @Test + public void testAuditWebHdfsStat() throws Exception { + final Path file = new Path(fnames[0]); + + fs.setPermission(file, new FsPermission((short)0644)); + fs.setOwner(file, "root", null); + + setupAuditLogs(); + + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); + FileStatus st = webfs.getFileStatus(file); + + verifyAuditLogs(true); + assertTrue("failed to stat file", st != null && st.isFile()); + } + + /** test that access via Hftp puts proper entry in audit log */ + @Test + public void testAuditHftp() throws Exception { + final Path file = new Path(fnames[0]); + + final String hftpUri = + "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + + HftpFileSystem hftpFs = null; + + setupAuditLogs(); + try { + hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(conf); + InputStream istream = hftpFs.open(file); + int val = istream.read(); + istream.close(); + + verifyAuditLogs(true); + } finally { + if (hftpFs != null) hftpFs.close(); + } + } + + /** test that denied access via webhdfs puts proper entry in audit log */ + @Test + public void testAuditWebHdfsDenied() throws Exception { + final Path file = new Path(fnames[0]); + + fs.setPermission(file, new FsPermission((short)0600)); + fs.setOwner(file, "root", null); + + setupAuditLogs(); + try { + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); + InputStream istream = webfs.open(file); + int val = istream.read(); + fail("open+read must not succeed, got " + val); + } catch(AccessControlException E) { + System.out.println("got access denied, as expected."); + } + verifyAuditLogsRepeat(false, 2); + } + /** Sets up log4j logger for auditlogs */ private void setupAuditLogs() throws IOException { File file = new File(auditLogFile); @@ -148,19 +244,34 @@ private void setupAuditLogs() throws IOException { logger.addAppender(appender); } + // Ensure audit log has only one entry private void verifyAuditLogs(boolean expectSuccess) throws IOException { + verifyAuditLogsRepeat(expectSuccess, 1); + } + + // Ensure audit log has exactly N entries + private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe) + throws IOException { // Turn off the logs Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); logger.setLevel(Level.OFF); - // Ensure audit log has only one entry BufferedReader reader = new BufferedReader(new FileReader(auditLogFile)); - String line = reader.readLine(); - assertNotNull(line); - assertTrue("Expected audit event not found in audit log", - auditPattern.matcher(line).matches()); - assertTrue("Expected success=" + expectSuccess, - successPattern.matcher(line).matches() == expectSuccess); - assertNull("Unexpected event in audit log", reader.readLine()); + String line = null; + boolean ret = true; + + try { + for (int i = 0; i < ndupe; i++) { + line = reader.readLine(); + assertNotNull(line); + assertTrue("Expected audit event not found in audit log", + auditPattern.matcher(line).matches()); + ret &= successPattern.matcher(line).matches(); + } + assertNull("Unexpected event in audit log", reader.readLine()); + assertTrue("Expected success=" + expectSuccess, ret == expectSuccess); + } finally { + reader.close(); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index b79904518e..85d7125c53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -95,6 +95,12 @@ public class TestFsck { "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + "cmd=fsck\\ssrc=\\/\\sdst=null\\s" + "perm=null"); + static final Pattern getfileinfoPattern = Pattern.compile( + "allowed=.*?\\s" + + "ugi=.*?\\s" + + "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + + "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" + + "perm=null"); static final Pattern numCorruptBlocksPattern = Pattern.compile( ".*Corrupt blocks:\t\t([0123456789]*).*"); @@ -180,10 +186,14 @@ private void verifyAuditLogs() throws IOException { Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); logger.setLevel(Level.OFF); - // Ensure audit log has only one for FSCK + // Audit log should contain one getfileinfo and one fsck BufferedReader reader = new BufferedReader(new FileReader(auditLogFile)); String line = reader.readLine(); assertNotNull(line); + assertTrue("Expected getfileinfo event not found in audit log", + getfileinfoPattern.matcher(line).matches()); + line = reader.readLine(); + assertNotNull(line); assertTrue("Expected fsck event not found in audit log", fsckPattern.matcher(line).matches()); assertNull("Unexpected event in audit log", reader.readLine()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java index 74373be6e5..fb8d529284 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java @@ -89,7 +89,6 @@ public void testDataLocality() throws Exception { //set client address to a particular datanode final DataNode dn = cluster.getDataNodes().get(i); final String ipAddr = dm.getDatanode(dn.getDatanodeId()).getIpAddr(); - NamenodeWebHdfsMethods.setRemoteAddress(ipAddr); //The chosen datanode must be the same as the client address final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode( From 4cdc854452f6fbc1ce04990e7f19474d0d5fe913 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 31 Aug 2012 04:10:46 +0000 Subject: [PATCH 32/62] HADOOP-8722. Update BUILDING.txt with latest snappy info. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379280 13f79535-47bb-0310-9956-ffa450edef68 --- BUILDING.txt | 24 +++++++++++++++++-- .../hadoop-common/CHANGES.txt | 3 +++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index b0a2740d66..9d6eb08765 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -54,12 +54,32 @@ Maven build goals: Build options: * Use -Pnative to compile/bundle native code - * Use -Dsnappy.prefix=(/usr/local) & -Dbundle.snappy=(false) to compile - Snappy JNI bindings and to bundle Snappy SO files * Use -Pdocs to generate & bundle the documentation in the distribution (using -Pdist) * Use -Psrc to create a project source TAR.GZ * Use -Dtar to create a TAR with the distribution (using -Pdist) + Snappy build options: + + Snappy is a compression library that can be utilized by the native code. + It is currently an optional component, meaning that Hadoop can be built with + or without this dependency. + + * Use -Drequire.snappy to fail the build if libsnappy.so is not found. + If this option is not specified and the snappy library is missing, + we silently build a version of libhadoop.so that cannot make use of snappy. + This option is recommended if you plan on making use of snappy and want + to get more repeatable builds. + + * Use -Dsnappy.prefix to specify a nonstandard location for the libsnappy + header files and library files. You do not need this option if you have + installed snappy using a package manager. + * Use -Dsnappy.lib to specify a nonstandard location for the libsnappy library + files. Similarly to snappy.prefix, you do not need this option if you have + installed snappy using a package manager. + * Use -Dbundle.snappy to copy the contents of the snappy.lib directory into + the final tar file. This option requires that -Dsnappy.lib is also given, + and it ignores the -Dsnappy.prefix option. + Tests options: * Use -DskipTests to skip tests when running the following Maven goals: diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2b0ba1da2d..83065d89f7 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -455,6 +455,9 @@ Branch-2 ( Unreleased changes ) HADOOP-8747. Syntax error on cmake version 2.6 patch 2 in JNIFlags.cmake. (cmccabe via tucu) + HADOOP-8722. Update BUILDING.txt with latest snappy info. + (Colin Patrick McCabe via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HADOOP-8220. ZKFailoverController doesn't handle failure to become active From d8ce3bee5813f6baa2017b6fb1ccd53de1546cf1 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 31 Aug 2012 16:33:21 +0000 Subject: [PATCH 33/62] YARN-63. RMNodeImpl is missing valid transitions from the UNHEALTHY state (Jason Lowe via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379498 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 ++ .../resourcemanager/rmnode/RMNodeImpl.java | 9 +++ .../TestRMNodeTransitions.java | 68 +++++++++++++++++++ 3 files changed, 81 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 50538dc6a1..bea4421f70 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -70,3 +70,7 @@ Release 0.23.3 - Unreleased YARN-31. Fix TestDelegationTokenRenewer to not depend on test order so as to pass tests on jdk7. (Thomas Graves via vinodkv) + + YARN-63. RMNodeImpl is missing valid transitions from the UNHEALTHY state + (Jason Lowe via bobby) + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 184a981dbb..7cdfa8d992 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -144,6 +144,15 @@ RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) .addTransition(NodeState.UNHEALTHY, EnumSet.of(NodeState.UNHEALTHY, NodeState.RUNNING), RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition()) + .addTransition(NodeState.UNHEALTHY, NodeState.DECOMMISSIONED, + RMNodeEventType.DECOMMISSION, + new DeactivateNodeTransition(NodeState.DECOMMISSIONED)) + .addTransition(NodeState.UNHEALTHY, NodeState.LOST, + RMNodeEventType.EXPIRE, + new DeactivateNodeTransition(NodeState.LOST)) + .addTransition(NodeState.UNHEALTHY, NodeState.REBOOTED, + RMNodeEventType.REBOOTING, + new DeactivateNodeTransition(NodeState.REBOOTED)) .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY, RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index bee7a39579..0c22aa00dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -23,6 +23,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -32,10 +33,12 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore; import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; @@ -148,4 +151,69 @@ public void testExpiredContainer() { Assert.assertEquals(0, completedContainers.size()); } + @Test + public void testRunningExpire() { + RMNodeImpl node = getRunningNode(); + node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE)); + Assert.assertEquals(NodeState.LOST, node.getState()); + } + + @Test + public void testUnhealthyExpire() { + RMNodeImpl node = getUnhealthyNode(); + node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE)); + Assert.assertEquals(NodeState.LOST, node.getState()); + } + + @Test + public void testRunningDecommission() { + RMNodeImpl node = getRunningNode(); + node.handle(new RMNodeEvent(node.getNodeID(), + RMNodeEventType.DECOMMISSION)); + Assert.assertEquals(NodeState.DECOMMISSIONED, node.getState()); + } + + @Test + public void testUnhealthyDecommission() { + RMNodeImpl node = getUnhealthyNode(); + node.handle(new RMNodeEvent(node.getNodeID(), + RMNodeEventType.DECOMMISSION)); + Assert.assertEquals(NodeState.DECOMMISSIONED, node.getState()); + } + + @Test + public void testRunningRebooting() { + RMNodeImpl node = getRunningNode(); + node.handle(new RMNodeEvent(node.getNodeID(), + RMNodeEventType.REBOOTING)); + Assert.assertEquals(NodeState.REBOOTED, node.getState()); + } + + @Test + public void testUnhealthyRebooting() { + RMNodeImpl node = getUnhealthyNode(); + node.handle(new RMNodeEvent(node.getNodeID(), + RMNodeEventType.REBOOTING)); + Assert.assertEquals(NodeState.REBOOTED, node.getState()); + } + + private RMNodeImpl getRunningNode() { + NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); + RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0, + null, null, null); + node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.STARTED)); + Assert.assertEquals(NodeState.RUNNING, node.getState()); + return node; + } + + private RMNodeImpl getUnhealthyNode() { + RMNodeImpl node = getRunningNode(); + NodeHealthStatus status = node.getNodeHealthStatus(); + status.setHealthReport("sick"); + status.setIsNodeHealthy(false); + node.handle(new RMNodeStatusEvent(node.getNodeID(), status, + new ArrayList(), null, null, null)); + Assert.assertEquals(NodeState.UNHEALTHY, node.getState()); + return node; + } } From e5a9d672f0068dc953430678b52dbc18874921b4 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 31 Aug 2012 17:01:25 +0000 Subject: [PATCH 34/62] HADOOP-8684. Deadlock between WritableComparator and WritableComparable. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379506 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/io/WritableComparator.java | 16 +++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 83065d89f7..6ae9e03a5d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -197,6 +197,9 @@ Trunk (unreleased changes) HADOOP-8623. hadoop jar command should respect HADOOP_OPTS. (Steven Willis via suresh) + HADOOP-8684. Deadlock between WritableComparator and WritableComparable. + (Jing Zhao via suresh) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java index 9d4087f1cd..d2cbe3bd62 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java @@ -18,8 +18,9 @@ package org.apache.hadoop.io; -import java.io.*; -import java.util.*; +import java.io.DataInput; +import java.io.IOException; +import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -38,12 +39,11 @@ @InterfaceStability.Stable public class WritableComparator implements RawComparator { - private static HashMap comparators = - new HashMap(); // registry + private static final ConcurrentHashMap comparators + = new ConcurrentHashMap(); // registry /** Get a comparator for a {@link WritableComparable} implementation. */ - public static synchronized - WritableComparator get(Class c) { + public static WritableComparator get(Class c) { WritableComparator comparator = comparators.get(c); if (comparator == null) { // force the static initializers to run @@ -76,12 +76,10 @@ private static void forceInit(Class cls) { /** Register an optimized comparator for a {@link WritableComparable} * implementation. Comparators registered with this method must be * thread-safe. */ - public static synchronized void define(Class c, - WritableComparator comparator) { + public static void define(Class c, WritableComparator comparator) { comparators.put(c, comparator); } - private final Class keyClass; private final WritableComparable key1; private final WritableComparable key2; From 2fd49f5a57067f96a84e4a80ff12184f5709f97a Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 31 Aug 2012 17:43:35 +0000 Subject: [PATCH 35/62] HDFS-3833. TestDFSShell fails on windows due to concurrent file read/write. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379525 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2cadb27278..3694fe51fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -716,6 +716,9 @@ Branch-2 ( Unreleased changes ) HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd) + HDFS-3833. TestDFSShell fails on windows due to concurrent file + read/write. (Brandon Li via suresh) + Release 2.0.0-alpha - 05-23-2012 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 4387fb75a8..1ebef6db51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -1284,6 +1284,11 @@ public Object run() throws Exception { public void testGet() throws IOException { DFSTestUtil.setLogLevel2All(FSInputChecker.LOG); final Configuration conf = new HdfsConfiguration(); + // Race can happen here: block scanner is reading the file when test tries + // to corrupt the test file, which will fail the test on Windows platform. + // Disable block scanner to avoid this race. + conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); From 38d003a6db26307cd6544e1ca303c5a521299fb4 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 31 Aug 2012 18:27:23 +0000 Subject: [PATCH 36/62] HADOOP-8727. Gracefully deprecate dfs.umaskmode in 2.x onwards (Harsh J via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379537 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/conf/Configuration.java | 2 ++ .../hadoop-common/src/main/resources/core-default.xml | 10 ++++++++++ 3 files changed, 15 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 6ae9e03a5d..09a989c485 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1002,6 +1002,9 @@ Release 0.23.3 - UNRELEASED HADOOP-8726. The Secrets in Credentials are not available to MR tasks (daryn and Benoy Antony via bobby) + HADOOP-8727. Gracefully deprecate dfs.umaskmode in 2.x onwards (Harsh J + via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index f1cb41dd6d..55d53da13a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -2324,6 +2324,8 @@ private static void addDeprecatedKeys() { new String[]{CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY}); Configuration.addDeprecation("fs.default.name", new String[]{CommonConfigurationKeys.FS_DEFAULT_NAME_KEY}); + Configuration.addDeprecation("dfs.umaskmode", + new String[]{CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY}); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 8866739b3c..b360e1f875 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1096,5 +1096,15 @@ + + fs.permissions.umask-mode + 022 + + The umask used when creating files and directories. + Can be in octal or in symbolic. Examples are: + "022" (octal for u=rwx,g=r-x,o=r-x in symbolic), + or "u=rwx,g=rwx,o=" (symbolic for 007 in octal). + + From 45a8e8c5a46535287de97fd6609c0743eef888ee Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 31 Aug 2012 19:11:05 +0000 Subject: [PATCH 37/62] YARN-60. Fixed a bug in ResourceManager which causes all NMs to get NPEs and thus causes all containers to be rejected. Contributed by Vinod Kumar Vavilapalli. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379550 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 + .../protocolrecords/NodeHeartbeatRequest.java | 11 +- .../impl/pb/NodeHeartbeatRequestPBImpl.java | 43 ++++++- .../yarn_server_common_service_protos.proto | 1 + .../nodemanager/NodeStatusUpdaterImpl.java | 21 ++- .../NMContainerTokenSecretManager.java | 1 - .../nodemanager/TestNodeStatusUpdater.java | 2 +- .../resourcemanager/ResourceManager.java | 7 +- .../ResourceTrackerService.java | 18 +-- .../server/resourcemanager/rmnode/RMNode.java | 3 - .../resourcemanager/rmnode/RMNodeImpl.java | 20 +-- .../rmnode/RMNodeStatusEvent.java | 9 +- .../RMContainerTokenSecretManager.java | 7 +- .../yarn/server/resourcemanager/MockNM.java | 27 +++- .../server/resourcemanager/MockNodes.java | 8 +- .../TestRMNodeTransitions.java | 2 +- .../webapp/TestRMWebServicesNodes.java | 5 +- .../hadoop-yarn-server-tests/pom.xml | 6 + .../server/TestContainerManagerSecurity.java | 18 +-- .../yarn/server/TestRMNMSecretKeys.java | 120 ++++++++++++++++++ 20 files changed, 242 insertions(+), 89 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index bea4421f70..8284215a98 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -74,3 +74,5 @@ Release 0.23.3 - Unreleased YARN-63. RMNodeImpl is missing valid transitions from the UNHEALTHY state (Jason Lowe via bobby) + YARN-60. Fixed a bug in ResourceManager which causes all NMs to get NPEs and + thus causes all containers to be rejected. (vinodkv) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java index 55403d11f4..9e69680d87 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java @@ -18,11 +18,14 @@ package org.apache.hadoop.yarn.server.api.protocolrecords; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeStatus; - public interface NodeHeartbeatRequest { - public abstract NodeStatus getNodeStatus(); - - public abstract void setNodeStatus(NodeStatus status); + + NodeStatus getNodeStatus(); + void setNodeStatus(NodeStatus status); + + MasterKey getLastKnownMasterKey(); + void setLastKnownMasterKey(MasterKey secretKey); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java index 00687981cd..8fcf7f2c14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java @@ -18,24 +18,25 @@ package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; - import org.apache.hadoop.yarn.api.records.ProtoBase; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProtoOrBuilder; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeStatus; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl; - - -public class NodeHeartbeatRequestPBImpl extends ProtoBase implements NodeHeartbeatRequest { +public class NodeHeartbeatRequestPBImpl extends + ProtoBase implements NodeHeartbeatRequest { NodeHeartbeatRequestProto proto = NodeHeartbeatRequestProto.getDefaultInstance(); NodeHeartbeatRequestProto.Builder builder = null; boolean viaProto = false; private NodeStatus nodeStatus = null; - + private MasterKey lastKnownMasterKey = null; public NodeHeartbeatRequestPBImpl() { builder = NodeHeartbeatRequestProto.newBuilder(); @@ -57,6 +58,10 @@ private void mergeLocalToBuilder() { if (this.nodeStatus != null) { builder.setNodeStatus(convertToProtoFormat(this.nodeStatus)); } + if (this.lastKnownMasterKey != null) { + builder + .setLastKnownMasterKey(convertToProtoFormat(this.lastKnownMasterKey)); + } } private void mergeLocalToProto() { @@ -96,6 +101,27 @@ public void setNodeStatus(NodeStatus nodeStatus) { this.nodeStatus = nodeStatus; } + @Override + public MasterKey getLastKnownMasterKey() { + NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder; + if (this.lastKnownMasterKey != null) { + return this.lastKnownMasterKey; + } + if (!p.hasLastKnownMasterKey()) { + return null; + } + this.lastKnownMasterKey = convertFromProtoFormat(p.getLastKnownMasterKey()); + return this.lastKnownMasterKey; + } + + @Override + public void setLastKnownMasterKey(MasterKey masterKey) { + maybeInitBuilder(); + if (masterKey == null) + builder.clearLastKnownMasterKey(); + this.lastKnownMasterKey = masterKey; + } + private NodeStatusPBImpl convertFromProtoFormat(NodeStatusProto p) { return new NodeStatusPBImpl(p); } @@ -104,6 +130,11 @@ private NodeStatusProto convertToProtoFormat(NodeStatus t) { return ((NodeStatusPBImpl)t).getProto(); } + private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) { + return new MasterKeyPBImpl(p); + } - + private MasterKeyProto convertToProtoFormat(MasterKey t) { + return ((MasterKeyPBImpl)t).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto index 5eb16f6dbb..e4d82c75d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto @@ -35,6 +35,7 @@ message RegisterNodeManagerResponseProto { message NodeHeartbeatRequestProto { optional NodeStatusProto node_status = 1; + optional MasterKeyProto last_known_master_key = 2; } message NodeHeartbeatResponseProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 6954a69491..819e22d214 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -31,6 +31,7 @@ import org.apache.avro.AvroRuntimeException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.YarnException; @@ -111,10 +112,7 @@ public synchronized void init(Configuration conf) { this.totalResource = recordFactory.newRecordInstance(Resource.class); this.totalResource.setMemory(memoryMb); metrics.addResource(totalResource); - this.tokenKeepAliveEnabled = - conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, - YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED) - && isSecurityEnabled(); + this.tokenKeepAliveEnabled = isTokenKeepAliveEnabled(conf); this.tokenRemovalDelayMs = conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS); @@ -163,10 +161,17 @@ synchronized boolean hasToRebootNode() { return this.hasToRebootNode; } - protected boolean isSecurityEnabled() { + private boolean isSecurityEnabled() { return UserGroupInformation.isSecurityEnabled(); } + @Private + protected boolean isTokenKeepAliveEnabled(Configuration conf) { + return conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, + YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED) + && isSecurityEnabled(); + } + protected ResourceTracker getRMClient() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); @@ -321,7 +326,11 @@ public void run() { NodeHeartbeatRequest request = recordFactory .newRecordInstance(NodeHeartbeatRequest.class); - request.setNodeStatus(nodeStatus); + request.setNodeStatus(nodeStatus); + if (isSecurityEnabled()) { + request.setLastKnownMasterKey(NodeStatusUpdaterImpl.this.context + .getContainerTokenSecretManager().getCurrentKey()); + } HeartbeatResponse response = resourceTracker.nodeHeartbeat(request).getHeartbeatResponse(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java index 29253eaf7a..bc70f26a07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java @@ -92,7 +92,6 @@ public synchronized byte[] retrievePassword( containerId.getApplicationAttemptId().getApplicationId(); MasterKeyData masterKeyToUse = null; - if (this.previousMasterKey != null && keyId == this.previousMasterKey.getMasterKey().getKeyId()) { // A container-launch has come in with a token generated off the last diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index a2f569b382..41d171f97c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -261,7 +261,7 @@ protected ResourceTracker getRMClient() { } @Override - protected boolean isSecurityEnabled() { + protected boolean isTokenKeepAliveEnabled(Configuration conf) { return true; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index e7dfa4d1c4..e9e5340b80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -159,7 +159,7 @@ public synchronized void init(Configuration conf) { DelegationTokenRenewer tokenRenewer = createDelegationTokenRenewer(); addService(tokenRenewer); - this.containerTokenSecretManager = new RMContainerTokenSecretManager(conf); + this.containerTokenSecretManager = createContainerTokenSecretManager(conf); this.rmContext = new RMContextImpl(this.store, this.rmDispatcher, @@ -231,6 +231,11 @@ public synchronized void init(Configuration conf) { super.init(conf); } + protected RMContainerTokenSecretManager createContainerTokenSecretManager( + Configuration conf) { + return new RMContainerTokenSecretManager(conf); + } + protected EventHandler createSchedulerEventDispatcher() { return new SchedulerEventDispatcher(this.scheduler); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 93d073079f..ed4a021b0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -169,14 +169,14 @@ public RegisterNodeManagerResponse registerNodeManager( return response; } - MasterKey nextMasterKeyForNode = null; if (isSecurityEnabled()) { - nextMasterKeyForNode = this.containerTokenSecretManager.getCurrentKey(); + MasterKey nextMasterKeyForNode = + this.containerTokenSecretManager.getCurrentKey(); regResponse.setMasterKey(nextMasterKeyForNode); } RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, - resolve(host), capability, nextMasterKeyForNode); + resolve(host), capability); RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); if (oldNode == null) { @@ -266,17 +266,18 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) latestResponse.addAllApplicationsToCleanup(rmNode.getAppsToCleanup()); latestResponse.setNodeAction(NodeAction.NORMAL); - MasterKey nextMasterKeyForNode = null; - // Check if node's masterKey needs to be updated and if the currentKey has // roller over, send it across if (isSecurityEnabled()) { + boolean shouldSendMasterKey = false; - MasterKey nodeKnownMasterKey = rmNode.getCurrentMasterKey(); - nextMasterKeyForNode = this.containerTokenSecretManager.getNextKey(); + + MasterKey nextMasterKeyForNode = + this.containerTokenSecretManager.getNextKey(); if (nextMasterKeyForNode != null) { // nextMasterKeyForNode can be null if there is no outstanding key that // is in the activation period. + MasterKey nodeKnownMasterKey = request.getLastKnownMasterKey(); if (nodeKnownMasterKey.getKeyId() != nextMasterKeyForNode.getKeyId()) { shouldSendMasterKey = true; } @@ -290,8 +291,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeStatusEvent(nodeId, remoteNodeStatus.getNodeHealthStatus(), remoteNodeStatus.getContainersStatuses(), - remoteNodeStatus.getKeepAliveApplications(), latestResponse, - nextMasterKeyForNode)); + remoteNodeStatus.getKeepAliveApplications(), latestResponse)); nodeHeartBeatResponse.setHeartbeatResponse(latestResponse); return nodeHeartBeatResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index d642422508..aafa3dbdef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -28,7 +28,6 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; -import org.apache.hadoop.yarn.server.api.records.MasterKey; /** * Node managers information on available resources @@ -107,6 +106,4 @@ public interface RMNode { public List getAppsToCleanup(); public HeartbeatResponse getLastHeartBeatResponse(); - - public MasterKey getCurrentMasterKey(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 7cdfa8d992..83833b9bdb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -46,7 +46,6 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; -import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEvent; import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType; @@ -105,8 +104,6 @@ public class RMNodeImpl implements RMNode, EventHandler { private HeartbeatResponse latestHeartBeatResponse = recordFactory .newRecordInstance(HeartbeatResponse.class); - private MasterKey currentMasterKey; - private static final StateMachineFactory stateMachine; public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, - int cmPort, int httpPort, Node node, Resource capability, - MasterKey masterKey) { + int cmPort, int httpPort, Node node, Resource capability) { this.nodeId = nodeId; this.context = context; this.hostName = hostName; @@ -178,7 +174,6 @@ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, this.nodeAddress = hostName + ":" + cmPort; this.httpAddress = hostName + ":" + httpPort; this.node = node; - this.currentMasterKey = masterKey; this.nodeHealthStatus.setIsNodeHealthy(true); this.nodeHealthStatus.setHealthReport("Healthy"); this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis()); @@ -312,17 +307,6 @@ public HeartbeatResponse getLastHeartBeatResponse() { this.readLock.unlock(); } } - - @Override - public MasterKey getCurrentMasterKey() { - this.readLock.lock(); - try { - return this.currentMasterKey; - } finally { - this.readLock.unlock(); - } - } - public void handle(RMNodeEvent event) { LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType()); @@ -500,7 +484,6 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // Switch the last heartbeatresponse. rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse(); - rmNode.currentMasterKey = statusEvent.getCurrentMasterKey(); NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus(); @@ -582,7 +565,6 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // Switch the last heartbeatresponse. rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse(); - rmNode.currentMasterKey = statusEvent.getCurrentMasterKey(); NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus(); rmNode.setNodeHealthStatus(remoteNodeHealthStatus); if (remoteNodeHealthStatus.getIsNodeHealthy()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java index 36c877d9e0..1285c2bed9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java @@ -25,7 +25,6 @@ import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; -import org.apache.hadoop.yarn.server.api.records.MasterKey; public class RMNodeStatusEvent extends RMNodeEvent { @@ -33,17 +32,15 @@ public class RMNodeStatusEvent extends RMNodeEvent { private final List containersCollection; private final HeartbeatResponse latestResponse; private final List keepAliveAppIds; - private final MasterKey currentMasterKey; public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus, List collection, List keepAliveAppIds, - HeartbeatResponse latestResponse, MasterKey currentMasterKey) { + HeartbeatResponse latestResponse) { super(nodeId, RMNodeEventType.STATUS_UPDATE); this.nodeHealthStatus = nodeHealthStatus; this.containersCollection = collection; this.keepAliveAppIds = keepAliveAppIds; this.latestResponse = latestResponse; - this.currentMasterKey = currentMasterKey; } public NodeHealthStatus getNodeHealthStatus() { @@ -61,8 +58,4 @@ public HeartbeatResponse getLatestResponse() { public List getKeepAliveAppIds() { return this.keepAliveAppIds; } - - public MasterKey getCurrentMasterKey() { - return this.currentMasterKey; - } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java index 467b4ad5e7..cc4ccd7e1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java @@ -89,7 +89,7 @@ public void stop() { * Creates a new master-key and sets it as the primary. */ @Private - protected void rollMasterKey() { + public void rollMasterKey() { super.writeLock.lock(); try { LOG.info("Rolling master-key for container-tokens"); @@ -97,6 +97,9 @@ protected void rollMasterKey() { this.currentMasterKey = createNewMasterKey(); } else { this.nextMasterKey = createNewMasterKey(); + LOG.info("Going to activate master-key with key-id " + + this.nextMasterKey.getMasterKey().getKeyId() + " in " + + this.activationDelay + "ms"); this.timer.schedule(new NextKeyActivator(), this.activationDelay); } } finally { @@ -122,7 +125,7 @@ public MasterKey getNextKey() { * Activate the new master-key */ @Private - protected void activateNextMasterKey() { + public void activateNextMasterKey() { super.writeLock.lock(); try { LOG.info("Activating next master key with id: " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java index 3e5742a0da..ba999bfb2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java @@ -35,7 +35,9 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeStatus; +import org.apache.hadoop.yarn.server.api.records.RegistrationResponse; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.Records; @@ -46,8 +48,9 @@ public class MockNM { private final int memory; private final ResourceTrackerService resourceTracker; private final int httpPort = 2; + private MasterKey currentMasterKey; - MockNM(String nodeIdStr, int memory, ResourceTrackerService resourceTracker) { + public MockNM(String nodeIdStr, int memory, ResourceTrackerService resourceTracker) { this.memory = memory; this.resourceTracker = resourceTracker; String[] splits = nodeIdStr.split(":"); @@ -72,7 +75,7 @@ public void containerStatus(Container container) throws Exception { nodeHeartbeat(conts, true); } - public NodeId registerNode() throws Exception { + public RegistrationResponse registerNode() throws Exception { RegisterNodeManagerRequest req = Records.newRecord( RegisterNodeManagerRequest.class); req.setNodeId(nodeId); @@ -80,13 +83,15 @@ public NodeId registerNode() throws Exception { Resource resource = Records.newRecord(Resource.class); resource.setMemory(memory); req.setResource(resource); - resourceTracker.registerNodeManager(req); - return nodeId; + RegistrationResponse registrationResponse = + resourceTracker.registerNodeManager(req).getRegistrationResponse(); + this.currentMasterKey = registrationResponse.getMasterKey(); + return registrationResponse; } - public HeartbeatResponse nodeHeartbeat(boolean b) throws Exception { + public HeartbeatResponse nodeHeartbeat(boolean isHealthy) throws Exception { return nodeHeartbeat(new HashMap>(), - b, ++responseId); + isHealthy, ++responseId); } public HeartbeatResponse nodeHeartbeat(ApplicationAttemptId attemptId, @@ -123,7 +128,15 @@ public HeartbeatResponse nodeHeartbeat(Map getAppsToCleanup() { public HeartbeatResponse getLastHeartBeatResponse() { return null; } - - @Override - public MasterKey getCurrentMasterKey() { - return null; - } }; private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 0c22aa00dc..2b2decccb6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -105,7 +105,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { new TestSchedulerEventDispatcher()); NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); - node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null, null); + node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index 14fc685c5b..084dcffe4c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -54,6 +54,7 @@ import org.w3c.dom.Element; import org.w3c.dom.NodeList; import org.xml.sax.InputSource; + import com.google.inject.Guice; import com.google.inject.Injector; import com.google.inject.servlet.GuiceServletContextListener; @@ -145,7 +146,7 @@ public void testNodesDefaultWithUnHealthyNode() throws JSONException, nodeHealth.setHealthReport("test health report"); nodeHealth.setIsNodeHealthy(false); node.handle(new RMNodeStatusEvent(nm3.getNodeId(), nodeHealth, - new ArrayList(), null, null, null)); + new ArrayList(), null, null)); rm.NMwaitForState(nm3.getNodeId(), NodeState.UNHEALTHY); ClientResponse response = @@ -360,7 +361,7 @@ public void testNodesQueryHealthyAndState() throws JSONException, Exception { nodeHealth.setHealthReport("test health report"); nodeHealth.setIsNodeHealthy(false); node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeHealth, - new ArrayList(), null, null, null)); + new ArrayList(), null, null)); rm.NMwaitForState(nm1.getNodeId(), NodeState.UNHEALTHY); ClientResponse response = r.path("ws").path("v1").path("cluster") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml index 06e9fccbe6..600c647f9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml @@ -44,6 +44,12 @@ org.apache.hadoop hadoop-yarn-server-resourcemanager + + org.apache.hadoop + hadoop-yarn-server-resourcemanager + test-jar + test + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index 527d4ff40f..1c7933ae27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -46,7 +46,6 @@ import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; @@ -222,7 +221,7 @@ public void testMaliceUser() throws IOException, InterruptedException { Resource modifiedResource = BuilderUtils.newResource(2048); ContainerTokenIdentifier modifiedIdentifier = new ContainerTokenIdentifier( dummyIdentifier.getContainerID(), dummyIdentifier.getNmHostAddress(), - modifiedResource, Long.MAX_VALUE, 0); + modifiedResource, Long.MAX_VALUE, dummyIdentifier.getMasterKeyId()); Token modifiedToken = new Token( modifiedIdentifier.getBytes(), containerToken.getPassword().array(), new Text(containerToken.getKind()), new Text(containerToken @@ -250,19 +249,14 @@ public Void run() { + "it will indicate RPC success"); } catch (Exception e) { Assert.assertEquals( - java.lang.reflect.UndeclaredThrowableException.class - .getCanonicalName(), e.getClass().getCanonicalName()); - Assert.assertEquals(RemoteException.class.getCanonicalName(), e - .getCause().getClass().getCanonicalName()); - Assert.assertEquals( - "org.apache.hadoop.security.token.SecretManager$InvalidToken", - ((RemoteException) e.getCause()).getClassName()); + java.lang.reflect.UndeclaredThrowableException.class + .getCanonicalName(), e.getClass().getCanonicalName()); Assert.assertTrue(e .getCause() .getMessage() - .matches( - "Given Container container_\\d*_\\d*_\\d\\d_\\d*" - + " seems to have an illegally generated token.")); + .contains( + "DIGEST-MD5: digest response format violation. " + + "Mismatched response.")); } return null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java new file mode 100644 index 0000000000..9b6024ce3c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java @@ -0,0 +1,120 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server; + +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.DrainDispatcher; +import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.RegistrationResponse; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; +import org.junit.Test; + +public class TestRMNMSecretKeys { + + @Test + public void testNMUpdation() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + UserGroupInformation.setConfiguration(conf); + // Default rolling and activation intervals are large enough, no need to + // intervene + + final DrainDispatcher dispatcher = new DrainDispatcher(); + ResourceManager rm = new ResourceManager(null) { + @Override + protected void doSecureLogin() throws IOException { + // Do nothing. + } + + @Override + protected Dispatcher createDispatcher() { + return dispatcher; + } + }; + rm.init(conf); + rm.start(); + + MockNM nm = new MockNM("host:1234", 3072, rm.getResourceTrackerService()); + RegistrationResponse registrationResponse = nm.registerNode(); + MasterKey masterKey = registrationResponse.getMasterKey(); + Assert.assertNotNull("Registration should cause a key-update!", masterKey); + dispatcher.await(); + + HeartbeatResponse response = nm.nodeHeartbeat(true); + Assert.assertNull( + "First heartbeat after registration shouldn't get any key updates!", + response.getMasterKey()); + dispatcher.await(); + + response = nm.nodeHeartbeat(true); + Assert + .assertNull( + "Even second heartbeat after registration shouldn't get any key updates!", + response.getMasterKey()); + dispatcher.await(); + + // Let's force a roll-over + RMContainerTokenSecretManager secretManager = + rm.getRMContainerTokenSecretManager(); + secretManager.rollMasterKey(); + + // Heartbeats after roll-over and before activation should be fine. + response = nm.nodeHeartbeat(true); + Assert.assertNotNull( + "Heartbeats after roll-over and before activation should not err out.", + response.getMasterKey()); + Assert.assertEquals( + "Roll-over should have incremented the key-id only by one!", + masterKey.getKeyId() + 1, response.getMasterKey().getKeyId()); + dispatcher.await(); + + response = nm.nodeHeartbeat(true); + Assert.assertNull( + "Second heartbeat after roll-over shouldn't get any key updates!", + response.getMasterKey()); + dispatcher.await(); + + // Let's force activation + secretManager.activateNextMasterKey(); + + response = nm.nodeHeartbeat(true); + Assert.assertNull("Activation shouldn't cause any key updates!", + response.getMasterKey()); + dispatcher.await(); + + response = nm.nodeHeartbeat(true); + Assert.assertNull( + "Even second heartbeat after activation shouldn't get any key updates!", + response.getMasterKey()); + dispatcher.await(); + + rm.stop(); + } +} From 27cfde7f4dd6e72796ae17400ed5b05081e49dff Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 31 Aug 2012 19:24:31 +0000 Subject: [PATCH 38/62] YARN-60: Fix minor merge conflict (bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379555 13f79535-47bb-0310-9956-ffa450edef68 --- .../yarn/server/resourcemanager/TestRMNodeTransitions.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 2b2decccb6..f9ba80cb97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -200,7 +200,7 @@ public void testUnhealthyRebooting() { private RMNodeImpl getRunningNode() { NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0, - null, null, null); + null, null); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.STARTED)); Assert.assertEquals(NodeState.RUNNING, node.getState()); return node; @@ -212,7 +212,7 @@ private RMNodeImpl getUnhealthyNode() { status.setHealthReport("sick"); status.setIsNodeHealthy(false); node.handle(new RMNodeStatusEvent(node.getNodeID(), status, - new ArrayList(), null, null, null)); + new ArrayList(), null, null)); Assert.assertEquals(NodeState.UNHEALTHY, node.getState()); return node; } From 60b8c6e1e184d3d1d1e3a1b4e4df1aabdb6a6f51 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 31 Aug 2012 19:57:34 +0000 Subject: [PATCH 39/62] YARN-66. aggregated logs permissions not set properly (tgraves via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379565 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../yarn/logaggregation/AggregatedLogFormat.java | 12 +++++++++++- .../yarn/logaggregation/TestAggregatedLogFormat.java | 8 ++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 8284215a98..6b0c12f116 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -76,3 +76,5 @@ Release 0.23.3 - Unreleased YARN-60. Fixed a bug in ResourceManager which causes all NMs to get NPEs and thus causes all containers to be rejected. (vinodkv) + + YARN-66. aggregated logs permissions not set properly (tgraves via bobby) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java index 407fc9ca26..008324f013 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.file.tfile.TFile; import org.apache.hadoop.security.UserGroupInformation; @@ -68,6 +69,13 @@ public class AggregatedLogFormat { //Maybe write out a list of containerLogs skipped by the retention policy. private static final int VERSION = 1; + /** + * Umask for the log file. + */ + private static final FsPermission APP_LOG_FILE_UMASK = FsPermission + .createImmutable((short) (0640 ^ 0777)); + + static { RESERVED_KEYS = new HashMap(); RESERVED_KEYS.put(APPLICATION_ACL_KEY.toString(), APPLICATION_ACL_KEY); @@ -194,7 +202,9 @@ public LogWriter(final Configuration conf, final Path remoteAppLogFile, userUgi.doAs(new PrivilegedExceptionAction() { @Override public FSDataOutputStream run() throws Exception { - return FileContext.getFileContext(conf).create( + FileContext fc = FileContext.getFileContext(conf); + fc.setUMask(APP_LOG_FILE_UMASK); + return fc.create( remoteAppLogFile, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), new Options.CreateOpts[] {}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java index ea8c8f79c5..de755a7215 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java @@ -32,7 +32,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -100,6 +102,11 @@ public void testReadAcontainerLogs1() throws Exception { logWriter.append(logKey, logValue); logWriter.closeWriter(); + // make sure permission are correct on the file + FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile); + Assert.assertEquals("permissions on log aggregation file are wrong", + FsPermission.createImmutable((short) 0640), fsStatus.getPermission()); + LogReader logReader = new LogReader(conf, remoteAppLogFile); LogKey rLogKey = new LogKey(); DataInputStream dis = logReader.next(rLogKey); @@ -123,6 +130,7 @@ public void testReadAcontainerLogs1() throws Exception { Assert.assertEquals(expectedLength, s.length()); } + private void writeSrcFile(Path srcFilePath, String fileName, long length) throws IOException { From 015256524c0fdcf0b8ede33e0f620cb2f0fb6064 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 31 Aug 2012 20:25:27 +0000 Subject: [PATCH 40/62] MAPREDUCE-4612. job summary file permissions not set when its created (tgraves via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379584 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5c88ff8217..d403233ab0 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -323,6 +323,9 @@ Release 2.1.0-alpha - Unreleased MAPREDUCE-4380. Empty Userlogs directory is getting created under logs directory (Devaraj K via bobby) + MAPREDUCE-4612. job summary file permissions not set when its created + (tgraves via bobby) + Release 2.0.0-alpha - 05-23-2012 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 01073cb34f..d94f074af9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -661,6 +661,8 @@ protected void closeEventWriter(JobId jobId) throws IOException { summaryFileOut = doneDirFS.create(qualifiedSummaryDoneFile, true); summaryFileOut.writeUTF(mi.getJobSummary().getJobSummaryString()); summaryFileOut.close(); + doneDirFS.setPermission(qualifiedSummaryDoneFile, new FsPermission( + JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS)); } catch (IOException e) { LOG.info("Unable to write out JobSummaryInfo to [" + qualifiedSummaryDoneFile + "]", e); @@ -894,7 +896,7 @@ private void moveToDoneNow(Path fromPath, Path toPath) throws IOException { stagingDirFS.delete(fromPath, false); } - } + } boolean pathExists(FileSystem fileSys, Path path) throws IOException { return fileSys.exists(path); From 63f941d2ad71bf528c5f565ec206a49571c59169 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 31 Aug 2012 20:37:41 +0000 Subject: [PATCH 41/62] MAPREDUCE-4614. Simplify debugging a job's tokens (daryn via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379595 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 8 +++++--- .../main/java/org/apache/hadoop/mapred/YarnChild.java | 6 +++++- .../java/org/apache/hadoop/mapreduce/JobSubmitter.java | 10 +++------- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index d403233ab0..e4593d0e9d 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -323,9 +323,6 @@ Release 2.1.0-alpha - Unreleased MAPREDUCE-4380. Empty Userlogs directory is getting created under logs directory (Devaraj K via bobby) - MAPREDUCE-4612. job summary file permissions not set when its created - (tgraves via bobby) - Release 2.0.0-alpha - 05-23-2012 INCOMPATIBLE CHANGES @@ -856,6 +853,11 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4600. TestTokenCache.java from MRV1 no longer compiles (daryn via bobby) + MAPREDUCE-4612. job summary file permissions not set when its created + (tgraves via bobby) + + MAPREDUCE-4614. Simplify debugging a job's tokens (daryn via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java index c05c7aa69d..6413bb1a2f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java @@ -93,7 +93,11 @@ public static void main(String[] args) throws Throwable { // Security framework already loaded the tokens into current ugi Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); - + LOG.info("Executing with tokens:"); + for (Token token: credentials.getAllTokens()) { + LOG.info(token); + } + // Create TaskUmbilicalProtocol as actual task owner. UserGroupInformation taskOwner = UserGroupInformation.createRemoteUser(firstTaskid.getJobID().toString()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 31081b332f..23be73a752 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -435,13 +435,9 @@ private void writeConf(Configuration conf, Path jobFile) private void printTokens(JobID jobId, Credentials credentials) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Printing tokens for job: " + jobId); - for(Token token: credentials.getAllTokens()) { - if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) { - LOG.debug("Submitting with " + token); - } - } + LOG.info("Submitting tokens for job: " + jobId); + for (Token token: credentials.getAllTokens()) { + LOG.info(token); } } From 25e96e455b3473387df865fbc1c3ad7ebf9ff1e4 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Fri, 31 Aug 2012 20:43:46 +0000 Subject: [PATCH 42/62] MAPREDUCE-4611. MR AM dies badly when Node is decommissioned (Robert Evans via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379599 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../jobhistory/JobHistoryEventHandler.java | 14 +-- .../hadoop/mapreduce/v2/app/MRAppMaster.java | 38 ++++++- .../mapreduce/v2/app/rm/RMCommunicator.java | 14 ++- .../TestJobHistoryEventHandler.java | 2 +- .../mapreduce/v2/app/TestStagingCleanup.java | 106 ++++++++++++++---- 6 files changed, 143 insertions(+), 34 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index e4593d0e9d..8327384561 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -858,6 +858,9 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4614. Simplify debugging a job's tokens (daryn via bobby) + MAPREDUCE-4611. MR AM dies badly when Node is decommissioned (Robert + Evans via tgraves) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index d94f074af9..7ba3ccd03a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -99,8 +99,8 @@ public class JobHistoryEventHandler extends AbstractService protected static final Map fileMap = Collections.synchronizedMap(new HashMap()); - // Has a signal (SIGTERM etc) been issued? - protected volatile boolean isSignalled = false; + // should job completion be force when the AM shuts down? + protected volatile boolean forceJobCompletion = false; public JobHistoryEventHandler(AppContext context, int startCount) { super("JobHistoryEventHandler"); @@ -322,7 +322,7 @@ public void stop() { // Process JobUnsuccessfulCompletionEvent for jobIds which still haven't // closed their event writers Iterator jobIt = fileMap.keySet().iterator(); - if(isSignalled) { + if(forceJobCompletion) { while (jobIt.hasNext()) { JobId toClose = jobIt.next(); MetaInfo mi = fileMap.get(toClose); @@ -911,9 +911,9 @@ private String getFileNameFromTmpFN(String tmpFileName) { return tmpFileName.substring(0, tmpFileName.length()-4); } - public void setSignalled(boolean isSignalled) { - this.isSignalled = isSignalled; - LOG.info("JobHistoryEventHandler notified that isSignalled was " - + isSignalled); + public void setForcejobCompletion(boolean forceJobCompletion) { + this.forceJobCompletion = forceJobCompletion; + LOG.info("JobHistoryEventHandler notified that forceJobCompletion is " + + forceJobCompletion); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index d80653767e..64d8bb8225 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -170,6 +170,8 @@ public class MRAppMaster extends CompositeService { private Credentials fsTokens = new Credentials(); // Filled during init private UserGroupInformation currentUser; // Will be setup during init + private volatile boolean isLastAMRetry = false; + public MRAppMaster(ApplicationAttemptId applicationAttemptId, ContainerId containerId, String nmHost, int nmPort, int nmHttpPort, long appSubmitTime) { @@ -195,11 +197,21 @@ public MRAppMaster(ApplicationAttemptId applicationAttemptId, @Override public void init(final Configuration conf) { - conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); downloadTokensAndSetupUGI(conf); - + + //TODO this is a hack, we really need the RM to inform us when we + // are the last one. This would allow us to configure retries on + // a per application basis. + int numAMRetries = conf.getInt(YarnConfiguration.RM_AM_MAX_RETRIES, + YarnConfiguration.DEFAULT_RM_AM_MAX_RETRIES); + isLastAMRetry = appAttemptID.getAttemptId() >= numAMRetries; + LOG.info("AM Retries: " + numAMRetries + + " attempt num: " + appAttemptID.getAttemptId() + + " is last retry: " + isLastAMRetry); + + context = new RunningAppContext(conf); // Job name is the same as the app name util we support DAG of jobs @@ -417,6 +429,8 @@ public void handle(JobFinishEvent event) { } try { + //We are finishing cleanly so this is the last retry + isLastAMRetry = true; // Stop all services // This will also send the final report to the ResourceManager LOG.info("Calling stop for all the services"); @@ -666,7 +680,11 @@ public void handle(ContainerAllocatorEvent event) { } public void setSignalled(boolean isSignalled) { - ((RMCommunicator) containerAllocator).setSignalled(true); + ((RMCommunicator) containerAllocator).setSignalled(isSignalled); + } + + public void setShouldUnregister(boolean shouldUnregister) { + ((RMCommunicator) containerAllocator).setShouldUnregister(shouldUnregister); } } @@ -717,7 +735,12 @@ private final class StagingDirCleaningService extends AbstractService { @Override public synchronized void stop() { try { - cleanupStagingDir(); + if(isLastAMRetry) { + cleanupStagingDir(); + } else { + LOG.info("Skipping cleaning up the staging dir. " + + "assuming AM will be retried."); + } } catch (IOException io) { LOG.error("Failed to cleanup staging dir: ", io); } @@ -1016,14 +1039,19 @@ static class MRAppMasterShutdownHook implements Runnable { public void run() { LOG.info("MRAppMaster received a signal. Signaling RMCommunicator and " + "JobHistoryEventHandler."); + // Notify the JHEH and RMCommunicator that a SIGTERM has been received so // that they don't take too long in shutting down if(appMaster.containerAllocator instanceof ContainerAllocatorRouter) { ((ContainerAllocatorRouter) appMaster.containerAllocator) .setSignalled(true); + ((ContainerAllocatorRouter) appMaster.containerAllocator) + .setShouldUnregister(appMaster.isLastAMRetry); } + if(appMaster.jobHistoryEventHandler != null) { - appMaster.jobHistoryEventHandler.setSignalled(true); + appMaster.jobHistoryEventHandler + .setForcejobCompletion(appMaster.isLastAMRetry); } appMaster.stop(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index b0471e68ca..e587ba852e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -84,6 +84,7 @@ public abstract class RMCommunicator extends AbstractService { private Job job; // Has a signal (SIGTERM etc) been issued? protected volatile boolean isSignalled = false; + private volatile boolean shouldUnregister = true; public RMCommunicator(ClientService clientService, AppContext context) { super("RMCommunicator"); @@ -213,7 +214,9 @@ public void stop() { } catch (InterruptedException ie) { LOG.warn("InterruptedException while stopping", ie); } - unregister(); + if(shouldUnregister) { + unregister(); + } super.stop(); } @@ -288,8 +291,15 @@ public AMRMProtocol run() { protected abstract void heartbeat() throws Exception; + public void setShouldUnregister(boolean shouldUnregister) { + this.shouldUnregister = shouldUnregister; + LOG.info("RMCommunicator notified that shouldUnregistered is: " + + shouldUnregister); + } + public void setSignalled(boolean isSignalled) { this.isSignalled = isSignalled; - LOG.info("RMCommunicator notified that iSignalled was : " + isSignalled); + LOG.info("RMCommunicator notified that iSignalled is: " + + isSignalled); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java index c1c227064e..4f86f91432 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java @@ -330,7 +330,7 @@ public void testSigTermedFunctionality() throws IOException { Mockito.when(jobId.getAppId()).thenReturn(mockAppId); jheh.addToFileMap(jobId); - jheh.setSignalled(true); + jheh.setForcejobCompletion(true); for(int i=0; i < numEvents; ++i) { events[i] = getEventToEnqueue(jobId); jheh.handle(events[i]); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index 063fcfa2cf..67c9cf5b53 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -23,6 +23,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.times; import java.io.IOException; @@ -47,6 +48,7 @@ import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; @@ -89,28 +91,94 @@ public void testDeletionofStaging() throws IOException { handler.handle(new JobFinishEvent(jobid)); verify(fs).delete(stagingJobPath, true); } + + @Test + public void testDeletionofStagingOnKill() throws IOException { + conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir); + conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 4); + fs = mock(FileSystem.class); + when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); + ApplicationAttemptId attemptId = recordFactory.newRecordInstance( + ApplicationAttemptId.class); + attemptId.setAttemptId(0); + ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); + appId.setClusterTimestamp(System.currentTimeMillis()); + appId.setId(0); + attemptId.setApplicationId(appId); + JobId jobid = recordFactory.newRecordInstance(JobId.class); + jobid.setAppId(appId); + ContainerAllocator mockAlloc = mock(ContainerAllocator.class); + MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc); + appMaster.init(conf); + //simulate the process being killed + MRAppMaster.MRAppMasterShutdownHook hook = + new MRAppMaster.MRAppMasterShutdownHook(appMaster); + hook.run(); + verify(fs, times(0)).delete(stagingJobPath, true); + } + + @Test + public void testDeletionofStagingOnKillLastTry() throws IOException { + conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir); + conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 1); + fs = mock(FileSystem.class); + when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); + ApplicationAttemptId attemptId = recordFactory.newRecordInstance( + ApplicationAttemptId.class); + attemptId.setAttemptId(1); + ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); + appId.setClusterTimestamp(System.currentTimeMillis()); + appId.setId(0); + attemptId.setApplicationId(appId); + JobId jobid = recordFactory.newRecordInstance(JobId.class); + jobid.setAppId(appId); + ContainerAllocator mockAlloc = mock(ContainerAllocator.class); + MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc); + appMaster.init(conf); + //simulate the process being killed + MRAppMaster.MRAppMasterShutdownHook hook = + new MRAppMaster.MRAppMasterShutdownHook(appMaster); + hook.run(); + verify(fs).delete(stagingJobPath, true); + } private class TestMRApp extends MRAppMaster { + ContainerAllocator allocator; - public TestMRApp(ApplicationAttemptId applicationAttemptId) { - super(applicationAttemptId, BuilderUtils.newContainerId( - applicationAttemptId, 1), "testhost", 2222, 3333, System - .currentTimeMillis()); - } - - @Override - protected FileSystem getFileSystem(Configuration conf) { - return fs; - } - - @Override - protected void sysexit() { - } - - @Override - public Configuration getConfig() { - return conf; - } + public TestMRApp(ApplicationAttemptId applicationAttemptId, + ContainerAllocator allocator) { + super(applicationAttemptId, BuilderUtils.newContainerId( + applicationAttemptId, 1), "testhost", 2222, 3333, System + .currentTimeMillis()); + this.allocator = allocator; + } + + public TestMRApp(ApplicationAttemptId applicationAttemptId) { + this(applicationAttemptId, null); + } + + @Override + protected FileSystem getFileSystem(Configuration conf) { + return fs; + } + + @Override + protected ContainerAllocator createContainerAllocator( + final ClientService clientService, final AppContext context) { + if(allocator == null) { + return super.createContainerAllocator(clientService, context); + } + return allocator; + } + + @Override + protected void sysexit() { + } + + @Override + public Configuration getConfig() { + return conf; + } } private final class MRAppTestCleanup extends MRApp { From 3819c964fad12ad80b0628c1e083e032df495e27 Mon Sep 17 00:00:00 2001 From: Daryn Sharp Date: Fri, 31 Aug 2012 21:24:48 +0000 Subject: [PATCH 43/62] HDFS-3873. Hftp assumes security is disabled if token fetch fails (daryn) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379615 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../apache/hadoop/hdfs/HftpFileSystem.java | 14 ++--- .../hadoop/hdfs/TestHftpDelegationToken.java | 55 +++++++++++++++++-- 3 files changed, 59 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3694fe51fa..1dcc6a2e65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1583,6 +1583,8 @@ Release 0.23.3 - UNRELEASED HDFS-3861. Deadlock in DFSClient (Kihwal Lee via daryn) + HDFS-3873. Hftp assumes security is disabled if token fetch fails (daryn) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index 8c73e2a6be..5c53644592 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -21,6 +21,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.net.ConnectException; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.URI; @@ -247,14 +248,13 @@ public Token run() throws IOException { Credentials c; try { c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer); - } catch (Exception e) { - LOG.info("Couldn't get a delegation token from " + nnHttpUrl + - " using http."); - if(LOG.isDebugEnabled()) { - LOG.debug("error was ", e); + } catch (IOException e) { + if (e.getCause() instanceof ConnectException) { + LOG.warn("Couldn't connect to " + nnHttpUrl + + ", assuming security is disabled"); + return null; } - //Maybe the server is in unsecure mode (that's bad but okay) - return null; + throw e; } for (Token t : c.getAllTokens()) { if(LOG.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java index e7df010213..cd8cf0d3f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java @@ -19,13 +19,11 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; - +import static org.junit.Assert.*; import java.io.IOException; import java.lang.reflect.Field; +import java.net.ServerSocket; +import java.net.Socket; import java.net.URI; import java.security.PrivilegedExceptionAction; @@ -138,6 +136,53 @@ public void testSelectHsftpDelegationToken() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5); } + + @Test + public void testInsecureRemoteCluster() throws Exception { + final ServerSocket socket = new ServerSocket(0); // just reserve a port + socket.close(); + Configuration conf = new Configuration(); + URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort()); + assertNull(FileSystem.newInstance(fsUri, conf).getDelegationToken(null)); + } + + @Test + public void testSecureClusterError() throws Exception { + final ServerSocket socket = new ServerSocket(0); + Thread t = new Thread() { + @Override + public void run() { + while (true) { // fetching does a few retries + try { + Socket s = socket.accept(); + s.getOutputStream().write(1234); + s.shutdownOutput(); + } catch (Exception e) { + break; + } + } + } + }; + t.start(); + + try { + Configuration conf = new Configuration(); + URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort()); + Exception ex = null; + try { + FileSystem.newInstance(fsUri, conf).getDelegationToken(null); + } catch (Exception e) { + ex = e; + } + assertNotNull(ex); + assertNotNull(ex.getCause()); + assertEquals("Unexpected end of file from server", + ex.getCause().getMessage()); + } finally { + t.interrupt(); + } + } + private void checkTokenSelection(HftpFileSystem fs, int port, Configuration conf) throws IOException { From 94129df35400882c7783eadfa1f854af4fd57066 Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Fri, 31 Aug 2012 21:25:25 +0000 Subject: [PATCH 44/62] MAPREDUCE-4604. In mapred-default, mapreduce.map.maxattempts & mapreduce.reduce.maxattempts defaults are set to 4 as well as mapreduce.job.maxtaskfailures.per.tracker. (Ravi Prakash via jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379617 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 ++++ .../java/org/apache/hadoop/mapred/JobConf.java | 2 +- .../src/main/resources/mapred-default.xml | 7 +++++-- .../java/org/apache/hadoop/conf/TestJobConf.java | 16 ++++++++++++++++ .../src/java/mapred-default.xml | 2 +- 5 files changed, 27 insertions(+), 4 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 8327384561..501970f098 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -861,6 +861,10 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4611. MR AM dies badly when Node is decommissioned (Robert Evans via tgraves) + MAPREDUCE-4604. In mapred-default, mapreduce.map.maxattempts & + mapreduce.reduce.maxattempts defaults are set to 4 as well as + mapreduce.job.maxtaskfailures.per.tracker. (Ravi Prakash via jeagles) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java index fde88dbe09..1304755e3c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java @@ -1357,7 +1357,7 @@ public void setMaxTaskFailuresPerTracker(int noFailures) { * @return the maximum no. of failures of a given job per tasktracker. */ public int getMaxTaskFailuresPerTracker() { - return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 4); + return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 3); } /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index b2b1f061c8..817cee8a42 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -797,9 +797,12 @@ mapreduce.job.maxtaskfailures.per.tracker - 4 + 3 The number of task-failures on a tasktracker of a given job - after which new tasks of that job aren't assigned to it. + after which new tasks of that job aren't assigned to it. It + MUST be less than mapreduce.map.maxattempts and + mapreduce.reduce.maxattempts otherwise the failed task will + never be tried on a different node. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java index bd3ca5b67c..73f039d636 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java @@ -21,6 +21,7 @@ import org.junit.Test; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobContext; import org.apache.hadoop.mapreduce.MRJobConfig; public class TestJobConf { @@ -185,4 +186,19 @@ public void testMaxVirtualMemoryForTask() { } + + /** + * Ensure that by default JobContext.MAX_TASK_FAILURES_PER_TRACKER is less + * JobContext.MAP_MAX_ATTEMPTS and JobContext.REDUCE_MAX_ATTEMPTS so that + * failed tasks will be retried on other nodes + */ + @Test + public void testMaxTaskFailuresPerTracker() { + JobConf jobConf = new JobConf(true); + Assert.assertTrue("By default JobContext.MAX_TASK_FAILURES_PER_TRACKER was " + + "not less than JobContext.MAP_MAX_ATTEMPTS and REDUCE_MAX_ATTEMPTS" + ,jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxMapAttempts() && + jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxReduceAttempts() + ); + } } diff --git a/hadoop-mapreduce-project/src/java/mapred-default.xml b/hadoop-mapreduce-project/src/java/mapred-default.xml index 322e497ada..63a2ef5281 100644 --- a/hadoop-mapreduce-project/src/java/mapred-default.xml +++ b/hadoop-mapreduce-project/src/java/mapred-default.xml @@ -701,7 +701,7 @@ mapreduce.job.maxtaskfailures.per.tracker - 4 + 3 The number of task-failures on a tasktracker of a given job after which new tasks of that job aren't assigned to it. From 94c69f2c820b49066cdfadc7aef0d9d2d5a451cb Mon Sep 17 00:00:00 2001 From: Daryn Sharp Date: Fri, 31 Aug 2012 21:31:55 +0000 Subject: [PATCH 45/62] HDFS-3852. TestHftpDelegationToken is broken after HADOOP-8225 (daryn) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379623 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1dcc6a2e65..d1692cbe12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1585,6 +1585,8 @@ Release 0.23.3 - UNRELEASED HDFS-3873. Hftp assumes security is disabled if token fetch fails (daryn) + HDFS-3852. TestHftpDelegationToken is broken after HADOOP-8225 (daryn) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java index cd8cf0d3f1..6be9af8cab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java @@ -53,7 +53,7 @@ public void testHdfsDelegationToken() throws Exception { new Text("127.0.0.1:8020")); user.addToken(token); Token token2 = new Token - (null, null, new Text("other token"), new Text("127.0.0.1:8020")); + (null, null, new Text("other token"), new Text("127.0.0.1:8021")); user.addToken(token2); assertEquals("wrong tokens in user", 2, user.getTokens().size()); FileSystem fs = From c76a7893f9215d25fca18f4444157db2addf720e Mon Sep 17 00:00:00 2001 From: Owen O'Malley Date: Fri, 31 Aug 2012 22:31:03 +0000 Subject: [PATCH 46/62] HDFS-3466. Get HTTP kerberos principal from the web authentication keytab. (omalley) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379646 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 8 ++++++-- .../hadoop/hdfs/server/namenode/NameNodeHttpServer.java | 7 +++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d1692cbe12..7e9ea2bf2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -698,8 +698,12 @@ Branch-2 ( Unreleased changes ) HDFS-3837. Fix DataNode.recoverBlock findbugs warning. (eli) - HDFS-3733. Audit logs should include WebHDFS access. (Andy Isaacson via eli) + HDFS-3733. Audit logs should include WebHDFS access. (Andy Isaacson via + eli) + HDFS-3466. Get HTTP kerberos principal from the web authentication keytab. + (omalley) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) @@ -997,7 +1001,7 @@ Release 2.0.0-alpha - 05-23-2012 (Brandon Li via szetszwo) HDFS-2617. Replaced Kerberized SSL for image transfer and fsck - with SPNEGO-based solution. (jghoman, tucu, and atm via eli) + with SPNEGO-based solution. (jghoman, omalley, tucu, and atm via eli) HDFS-3365. Enable users to disable socket caching in DFS client configuration (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 7090f455d8..5ba7b63d24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -117,8 +117,11 @@ private Map getAuthFilterParams(Configuration conf) SecurityUtil.getServerPrincipal(principalInConf, bindAddress.getHostName())); } - String httpKeytab = conf - .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); + String httpKeytab = conf.get( + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); + if (httpKeytab == null) { + httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY); + } if (httpKeytab != null && !httpKeytab.isEmpty()) { params.put( DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, From 4911d9b1ffb68d556bd8087b2d9d8decceed0fe1 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 31 Aug 2012 22:40:51 +0000 Subject: [PATCH 47/62] HADOOP-8736. Add Builder for building RPC server. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379652 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../org/apache/hadoop/ha/ZKFCRpcServer.java | 9 +- .../main/java/org/apache/hadoop/ipc/RPC.java | 104 ++++++++++++++++++ .../apache/hadoop/ipc/MiniRPCBenchmark.java | 6 +- .../apache/hadoop/ipc/RPCCallBenchmark.java | 11 +- .../ipc/TestMultipleProtocolServer.java | 10 +- .../apache/hadoop/ipc/TestProtoBufRpc.java | 3 +- .../java/org/apache/hadoop/ipc/TestRPC.java | 88 +++++++++++---- .../hadoop/ipc/TestRPCCompatibility.java | 35 +++--- .../org/apache/hadoop/ipc/TestSaslRPC.java | 43 +++++--- .../security/TestDoAsEffectiveUser.java | 42 ++++--- 11 files changed, 270 insertions(+), 83 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 09a989c485..61c44f3eba 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -101,6 +101,8 @@ Trunk (unreleased changes) HADOOP-8619. WritableComparator must implement no-arg constructor. (Chris Douglas via Suresh) + HADOOP-8736. Add Builder for building RPC server. (Brandon Li via Suresh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java index 2077a86a5d..7ea5188ad8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java @@ -55,11 +55,10 @@ public class ZKFCRpcServer implements ZKFCProtocol { new ZKFCProtocolServerSideTranslatorPB(this); BlockingService service = ZKFCProtocolService .newReflectiveBlockingService(translator); - this.server = RPC.getServer( - ZKFCProtocolPB.class, - service, bindAddr.getHostName(), - bindAddr.getPort(), HANDLER_COUNT, false, conf, - null /*secretManager*/); + this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class) + .setInstance(service).setBindAddress(bindAddr.getHostName()) + .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT) + .setVerbose(false).build(); // set service-level authorization security policy if (conf.getBoolean( diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index f4c878e29d..550b04719d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -713,6 +713,110 @@ Server getServer(Class protocol, null); } + /** + * Class to construct instances of RPC server with specific options. + */ + public static class Builder { + private Class protocol = null; + private Object instance = null; + private String bindAddress = "0.0.0.0"; + private int port = 0; + private int numHandlers = 1; + private int numReaders = -1; + private int queueSizePerHandler = -1; + private boolean verbose = false; + private final Configuration conf; + private SecretManager secretManager = null; + private String portRangeConfig = null; + + public Builder(Configuration conf) { + this.conf = conf; + } + + /** Mandatory field */ + public Builder setProtocol(Class protocol) { + this.protocol = protocol; + return this; + } + + /** Mandatory field */ + public Builder setInstance(Object instance) { + this.instance = instance; + return this; + } + + /** Default: 0.0.0.0 */ + public Builder setBindAddress(String bindAddress) { + this.bindAddress = bindAddress; + return this; + } + + /** Default: 0 */ + public Builder setPort(int port) { + this.port = port; + return this; + } + + /** Default: 1 */ + public Builder setNumHandlers(int numHandlers) { + this.numHandlers = numHandlers; + return this; + } + + /** Default: -1 */ + public Builder setnumReaders(int numReaders) { + this.numReaders = numReaders; + return this; + } + + /** Default: -1 */ + public Builder setQueueSizePerHandler(int queueSizePerHandler) { + this.queueSizePerHandler = queueSizePerHandler; + return this; + } + + /** Default: false */ + public Builder setVerbose(boolean verbose) { + this.verbose = verbose; + return this; + } + + /** Default: null */ + public Builder setSecretManager( + SecretManager secretManager) { + this.secretManager = secretManager; + return this; + } + + /** Default: null */ + public Builder setPortRangeConfig(String portRangeConfig) { + this.portRangeConfig = portRangeConfig; + return this; + } + + /** + * Build the RPC Server. + * @throws IOException on error + * @throws HadoopIllegalArgumentException when mandatory fields are not set + */ + public Server build() throws IOException, HadoopIllegalArgumentException { + if (this.conf == null) { + throw new HadoopIllegalArgumentException("conf is not set"); + } + if (this.protocol == null) { + throw new HadoopIllegalArgumentException("protocol is not set"); + } + if (this.instance == null) { + throw new HadoopIllegalArgumentException("instance is not set"); + } + + return getProtocolEngine(this.protocol, this.conf).getServer( + this.protocol, this.instance, this.bindAddress, this.port, + this.numHandlers, this.numReaders, this.queueSizePerHandler, + this.verbose, this.conf, this.secretManager, this.portRangeConfig); + } + } + /** An RPC Server. */ public abstract static class Server extends org.apache.hadoop.ipc.Server { boolean verbose; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java index a82419d5dd..5130bad1a6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java @@ -165,8 +165,10 @@ public Token getDelegationToken(Text renewer) new TestDelegationTokenSecretManager(24*60*60*1000, 7*24*60*60*1000,24*60*60*1000,3600000); secretManager.startThreads(); - rpcServer = RPC.getServer(MiniProtocol.class, - this, DEFAULT_SERVER_ADDRESS, 0, 1, false, conf, secretManager); + rpcServer = new RPC.Builder(conf).setProtocol(MiniProtocol.class) + .setInstance(this).setBindAddress(DEFAULT_SERVER_ADDRESS).setPort(0) + .setNumHandlers(1).setVerbose(false).setSecretManager(secretManager) + .build(); rpcServer.start(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index 5fe2302139..21aa44a7c9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -227,11 +227,14 @@ private Server startServer(MyOptions opts) throws IOException { BlockingService service = TestProtobufRpcProto .newReflectiveBlockingService(serverImpl); - server = RPC.getServer(TestRpcService.class, service, - opts.host, opts.port, opts.serverThreads, false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestRpcService.class) + .setInstance(service).setBindAddress(opts.host).setPort(opts.port) + .setNumHandlers(opts.serverThreads).setVerbose(false).build(); } else if (opts.rpcEngine == WritableRpcEngine.class) { - server = RPC.getServer(TestProtocol.class, new TestRPC.TestImpl(), - opts.host, opts.port, opts.serverThreads, false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host) + .setPort(opts.port).setNumHandlers(opts.serverThreads) + .setVerbose(false).build(); } else { throw new RuntimeException("Bad engine: " + opts.rpcEngine); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java index 0446b42508..dd73b52cc8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java @@ -175,8 +175,9 @@ public void hello() { @Before public void setUp() throws Exception { // create a server with two handlers - server = RPC.getServer(Foo0.class, - new Foo0Impl(), ADDRESS, 0, 2, false, conf, null); + server = new RPC.Builder(conf).setProtocol(Foo0.class) + .setInstance(new Foo0Impl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl()); server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl()); server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl()); @@ -263,8 +264,9 @@ public void testNonExistingProtocol2() throws IOException { @Test(expected=IOException.class) public void testIncorrectServerCreation() throws IOException { - RPC.getServer(Foo1.class, - new Foo0Impl(), ADDRESS, 0, 2, false, conf, null); + new RPC.Builder(conf).setProtocol(Foo1.class).setInstance(new Foo0Impl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false) + .build(); } // Now test a PB service - a server hosts both PB and Writable Rpcs. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index 9e7b269441..54e227a26b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -113,7 +113,8 @@ public void setUp() throws IOException { // Setup server for both protocols .newReflectiveBlockingService(serverImpl); // Get RPC server for server side implementation - server = RPC.getServer(TestRpcService.class, service, ADDRESS, PORT, conf); + server = new RPC.Builder(conf).setProtocol(TestRpcService.class) + .setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build(); addr = NetUtils.getConnectAddress(server); // now the second protocol diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index bf9fbc26d8..732431d2a1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -314,8 +314,9 @@ public int getCloseCalled() { @Test public void testConfRpc() throws Exception { - Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 1, false, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(1).setVerbose(false).build(); // Just one handler int confQ = conf.getInt( CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY, @@ -328,8 +329,11 @@ public void testConfRpc() throws Exception { assertEquals(confReaders, server.getNumReaders()); server.stop(); - server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 1, 3, 200, false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200) + .setVerbose(false).build(); + assertEquals(3, server.getNumReaders()); assertEquals(200, server.getMaxQueueSize()); server.stop(); @@ -337,8 +341,8 @@ public void testConfRpc() throws Exception { @Test public void testProxyAddress() throws Exception { - Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, conf); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build(); TestProtocol proxy = null; try { @@ -362,8 +366,10 @@ public void testProxyAddress() throws Exception { public void testSlowRpc() throws Exception { System.out.println("Testing Slow RPC"); // create a server with two handlers - Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 2, false, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); + TestProtocol proxy = null; try { @@ -409,8 +415,8 @@ public void testCalls() throws Exception { } private void testCallsInternal(Configuration conf) throws Exception { - Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, conf); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build(); TestProtocol proxy = null; try { server.start(); @@ -528,8 +534,9 @@ public Service[] getServices() { } private void doRPCs(Configuration conf, boolean expectFailure) throws Exception { - Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 5, true, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).build(); server.refreshServiceAcl(conf, new TestPolicyProvider()); @@ -573,8 +580,9 @@ private void doRPCs(Configuration conf, boolean expectFailure) throws Exception @Test public void testServerAddress() throws IOException { - Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 5, true, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).build(); InetSocketAddress bindAddr = null; try { bindAddr = NetUtils.getConnectAddress(server); @@ -668,8 +676,9 @@ public void testWrappedStopProxy() throws IOException { @Test public void testErrorMsgForInsecureClient() throws Exception { - final Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 5, true, conf, null); + final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).build(); server.enableSecurity(); server.start(); boolean succeeded = false; @@ -693,8 +702,10 @@ public void testErrorMsgForInsecureClient() throws Exception { conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2); - final Server multiServer = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 5, true, conf, null); + final Server multiServer = new RPC.Builder(conf) + .setProtocol(TestProtocol.class).setInstance(new TestImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .build(); multiServer.enableSecurity(); multiServer.start(); succeeded = false; @@ -748,8 +759,9 @@ public void testStopsAllThreads() throws Exception { assertEquals("Expect no Reader threads running before test", 0, threadsBefore); - final Server server = RPC.getServer(TestProtocol.class, - new TestImpl(), ADDRESS, 0, 5, true, conf, null); + final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).build(); server.start(); try { int threadsRunning = countThreads("Server$Listener$Reader"); @@ -762,6 +774,42 @@ public void testStopsAllThreads() throws Exception { 0, threadsAfter); } + @Test + public void testRPCBuilder() throws Exception { + // Test mandatory field conf + try { + new RPC.Builder(null).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).build(); + fail("Didn't throw HadoopIllegalArgumentException"); + } catch (Exception e) { + if (!(e instanceof HadoopIllegalArgumentException)) { + fail("Expecting HadoopIllegalArgumentException but caught " + e); + } + } + // Test mandatory field protocol + try { + new RPC.Builder(conf).setInstance(new TestImpl()).setBindAddress(ADDRESS) + .setPort(0).setNumHandlers(5).setVerbose(true).build(); + fail("Didn't throw HadoopIllegalArgumentException"); + } catch (Exception e) { + if (!(e instanceof HadoopIllegalArgumentException)) { + fail("Expecting HadoopIllegalArgumentException but caught " + e); + } + } + // Test mandatory field instance + try { + new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5) + .setVerbose(true).build(); + fail("Didn't throw HadoopIllegalArgumentException"); + } catch (Exception e) { + if (!(e instanceof HadoopIllegalArgumentException)) { + fail("Expecting HadoopIllegalArgumentException but caught " + e); + } + } + } + public static void main(String[] args) throws Exception { new TestRPC().testCallsInternal(conf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java index e2b7707cd9..608119d184 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java @@ -131,8 +131,9 @@ public void tearDown() throws IOException { public void testVersion0ClientVersion1Server() throws Exception { // create a server with two handlers TestImpl1 impl = new TestImpl1(); - server = RPC.getServer(TestProtocol1.class, - impl, ADDRESS, 0, 2, false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol1.class) + .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2) + .setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl); server.start(); addr = NetUtils.getConnectAddress(server); @@ -147,8 +148,9 @@ public void testVersion0ClientVersion1Server() throws Exception { @Test // old client vs new server public void testVersion1ClientVersion0Server() throws Exception { // create a server with two handlers - server = RPC.getServer(TestProtocol0.class, - new TestImpl0(), ADDRESS, 0, 2, false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol0.class) + .setInstance(new TestImpl0()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); server.start(); addr = NetUtils.getConnectAddress(server); @@ -198,8 +200,9 @@ public void ping() throws IOException { public void testVersion2ClientVersion1Server() throws Exception { // create a server with two handlers TestImpl1 impl = new TestImpl1(); - server = RPC.getServer(TestProtocol1.class, - impl, ADDRESS, 0, 2, false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol1.class) + .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2) + .setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl); server.start(); addr = NetUtils.getConnectAddress(server); @@ -219,8 +222,9 @@ public void testVersion2ClientVersion2Server() throws Exception { ProtocolSignature.resetCache(); // create a server with two handlers TestImpl2 impl = new TestImpl2(); - server = RPC.getServer(TestProtocol2.class, - impl, ADDRESS, 0, 2, false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol2.class) + .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2) + .setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl); server.start(); addr = NetUtils.getConnectAddress(server); @@ -290,8 +294,9 @@ public interface TestProtocol4 extends TestProtocol2 { @Test public void testVersionMismatch() throws IOException { - server = RPC.getServer(TestProtocol2.class, new TestImpl2(), ADDRESS, 0, 2, - false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol2.class) + .setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); server.start(); addr = NetUtils.getConnectAddress(server); @@ -308,8 +313,9 @@ public void testVersionMismatch() throws IOException { @Test public void testIsMethodSupported() throws IOException { - server = RPC.getServer(TestProtocol2.class, new TestImpl2(), ADDRESS, 0, 2, - false, conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol2.class) + .setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); server.start(); addr = NetUtils.getConnectAddress(server); @@ -332,8 +338,9 @@ public void testIsMethodSupported() throws IOException { @Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception { TestImpl1 impl = new TestImpl1(); - server = RPC.getServer(TestProtocol1.class, impl, ADDRESS, 0, 2, false, - conf, null); + server = new RPC.Builder(conf).setProtocol(TestProtocol1.class) + .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2) + .setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl); server.start(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 014875440e..db4a146542 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -235,9 +235,11 @@ public Class annotationType() { @Test public void testDigestRpc() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); - final Server server = RPC.getServer(TestSaslProtocol.class, - new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm); - + final Server server = new RPC.Builder(conf) + .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .setSecretManager(sm).build(); + doDigestRpc(server, sm); } @@ -246,9 +248,10 @@ public void testDigestRpcWithoutAnnotation() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); try { SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo()); - final Server server = RPC.getServer(TestSaslProtocol.class, - new TestSaslImpl(), ADDRESS, 0, 5, - true, conf, sm); + final Server server = new RPC.Builder(conf) + .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5) + .setVerbose(true).setSecretManager(sm).build(); doDigestRpc(server, sm); } finally { SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]); @@ -257,8 +260,9 @@ public void testDigestRpcWithoutAnnotation() throws Exception { @Test public void testSecureToInsecureRpc() throws Exception { - Server server = RPC.getServer(TestSaslProtocol.class, - new TestSaslImpl(), ADDRESS, 0, 5, true, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class) + .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).build(); server.disableSecurity(); TestTokenSecretManager sm = new TestTokenSecretManager(); doDigestRpc(server, sm); @@ -267,8 +271,10 @@ public void testSecureToInsecureRpc() throws Exception { @Test public void testErrorMessage() throws Exception { BadTokenSecretManager sm = new BadTokenSecretManager(); - final Server server = RPC.getServer(TestSaslProtocol.class, - new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm); + final Server server = new RPC.Builder(conf) + .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .setSecretManager(sm).build(); boolean succeeded = false; try { @@ -355,8 +361,10 @@ public void testGetRemotePrincipal() throws Exception { @Test public void testPerConnectionConf() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); - final Server server = RPC.getServer(TestSaslProtocol.class, - new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm); + final Server server = new RPC.Builder(conf) + .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .setSecretManager(sm).build(); server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); @@ -418,8 +426,10 @@ static void testKerberosRpc(String principal, String keytab) throws Exception { UserGroupInformation current = UserGroupInformation.getCurrentUser(); System.out.println("UGI: " + current); - Server server = RPC.getServer(TestSaslProtocol.class, new TestSaslImpl(), - ADDRESS, 0, 5, true, newConf, null); + Server server = new RPC.Builder(newConf) + .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .build(); TestSaslProtocol proxy = null; server.start(); @@ -441,8 +451,9 @@ static void testKerberosRpc(String principal, String keytab) throws Exception { @Test public void testDigestAuthMethod() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); - Server server = RPC.getServer(TestSaslProtocol.class, - new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm); + Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class) + .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index de35cd2460..529124eddf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -155,8 +155,9 @@ public void testRealUserSetup() throws IOException { conf.setStrings(ProxyUsers .getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); - Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, - 0, 5, true, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).build(); refreshConf(conf); try { @@ -197,8 +198,9 @@ public void testRealUserAuthorizationSuccess() throws IOException { configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); conf.setStrings(ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); - Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, - 0, 2, false, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); refreshConf(conf); try { @@ -244,8 +246,9 @@ public void testRealUserIPAuthorizationFailure() throws IOException { "20.20.20.20"); //Authorized IP address conf.setStrings(ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); - Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, - 0, 2, false, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); refreshConf(conf); @@ -286,8 +289,9 @@ public void testRealUserIPNotSpecified() throws IOException { final Configuration conf = new Configuration(); conf.setStrings(ProxyUsers .getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); - Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, - 0, 2, false, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); try { server.start(); @@ -325,8 +329,9 @@ public String run() throws IOException { public void testRealUserGroupNotSpecified() throws IOException { final Configuration conf = new Configuration(); configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); - Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, - 0, 2, false, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); try { server.start(); @@ -366,9 +371,9 @@ public void testRealUserGroupAuthorizationFailure() throws IOException { configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); conf.setStrings(ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group3"); - Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, - 0, 2, false, conf, null); - + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(2).setVerbose(false).build(); try { server.start(); @@ -414,8 +419,9 @@ public void testProxyWithToken() throws Exception { conf .set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); - final Server server = RPC.getServer(TestProtocol.class, new TestImpl(), - ADDRESS, 0, 5, true, conf, sm); + final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); server.start(); @@ -468,8 +474,10 @@ public void testTokenBySuperUser() throws Exception { newConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(newConf); - final Server server = RPC.getServer(TestProtocol.class, new TestImpl(), - ADDRESS, 0, 5, true, newConf, sm); + final Server server = new RPC.Builder(newConf) + .setProtocol(TestProtocol.class).setInstance(new TestImpl()) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .setSecretManager(sm).build(); server.start(); From 50222ff52903431ec7aefa30fdf4fdaa04915c92 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Sat, 1 Sep 2012 11:41:15 +0000 Subject: [PATCH 48/62] HDFS-3871. Change NameNodeProxies to use RetryUtils. Contributed by Arun C Murthy git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379743 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../apache/hadoop/hdfs/NameNodeProxies.java | 106 ++---------------- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 11 +- 3 files changed, 22 insertions(+), 98 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7e9ea2bf2e..18c354c828 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -434,6 +434,9 @@ Branch-2 ( Unreleased changes ) HDFS-3177. Update DFSClient and DataXceiver to handle different checkum types in file checksum computation. (Kihwal Lee via szetszwo) + HDFS-3871. Change NameNodeProxies to use RetryUtils. (Arun C Murthy + via szetszwo) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index 12ec985fb2..5ae2ee424f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -57,6 +57,7 @@ import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.io.retry.RetryUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; @@ -68,7 +69,6 @@ import org.apache.hadoop.tools.GetUserMappingsProtocol; import com.google.common.base.Preconditions; -import com.google.protobuf.ServiceException; /** * Create proxy objects to communicate with a remote NN. All remote access to an @@ -243,106 +243,20 @@ private static NamenodeProtocol createNNProxyWithNamenodeProtocol( return new NamenodeProtocolTranslatorPB(proxy); } - /** - * Return the default retry policy used in RPC. - * - * If dfs.client.retry.policy.enabled == false, use TRY_ONCE_THEN_FAIL. - * - * Otherwise, first unwrap ServiceException if possible, and then - * (1) use multipleLinearRandomRetry for - * - SafeModeException, or - * - IOException other than RemoteException, or - * - ServiceException; and - * (2) use TRY_ONCE_THEN_FAIL for - * - non-SafeMode RemoteException, or - * - non-IOException. - * - * Note that dfs.client.retry.max < 0 is not allowed. - */ - public static RetryPolicy getDefaultRetryPolicy(Configuration conf) { - final RetryPolicy multipleLinearRandomRetry = getMultipleLinearRandomRetry(conf); - if (LOG.isDebugEnabled()) { - LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry); - } - if (multipleLinearRandomRetry == null) { - //no retry - return RetryPolicies.TRY_ONCE_THEN_FAIL; - } else { - return new RetryPolicy() { - @Override - public RetryAction shouldRetry(Exception e, int retries, int failovers, - boolean isMethodIdempotent) throws Exception { - if (e instanceof ServiceException) { - //unwrap ServiceException - final Throwable cause = e.getCause(); - if (cause != null && cause instanceof Exception) { - e = (Exception)cause; - } - } - - //see (1) and (2) in the javadoc of this method. - final RetryPolicy p; - if (e instanceof RemoteException) { - final RemoteException re = (RemoteException)e; - p = SafeModeException.class.getName().equals(re.getClassName())? - multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL; - } else if (e instanceof IOException || e instanceof ServiceException) { - p = multipleLinearRandomRetry; - } else { //non-IOException - p = RetryPolicies.TRY_ONCE_THEN_FAIL; - } - - if (LOG.isDebugEnabled()) { - LOG.debug("RETRY " + retries + ") policy=" - + p.getClass().getSimpleName() + ", exception=" + e); - } - LOG.info("RETRY " + retries + ") policy=" - + p.getClass().getSimpleName() + ", exception=" + e); - return p.shouldRetry(e, retries, failovers, isMethodIdempotent); - } - - @Override - public String toString() { - return "RetryPolicy[" + multipleLinearRandomRetry + ", " - + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName() - + "]"; - } - }; - } - } - - /** - * Return the MultipleLinearRandomRetry policy specified in the conf, - * or null if the feature is disabled. - * If the policy is specified in the conf but the policy cannot be parsed, - * the default policy is returned. - * - * Conf property: N pairs of sleep-time and number-of-retries - * dfs.client.retry.policy = "s1,n1,s2,n2,..." - */ - private static RetryPolicy getMultipleLinearRandomRetry(Configuration conf) { - final boolean enabled = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT); - if (!enabled) { - return null; - } - - final String policy = conf.get( - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT); - - final RetryPolicy r = RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString(policy); - return r != null? r: RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString( - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT); - } - private static ClientProtocol createNNProxyWithClientProtocol( InetSocketAddress address, Configuration conf, UserGroupInformation ugi, boolean withRetries) throws IOException { RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class); - final RetryPolicy defaultPolicy = getDefaultRetryPolicy(conf); + final RetryPolicy defaultPolicy = + RetryUtils.getDefaultRetryPolicy( + conf, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT, + SafeModeException.class); + final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class); ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy( ClientNamenodeProtocolPB.class, version, address, ugi, conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index fa2f7b9317..1dc6af3733 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -54,7 +54,6 @@ import org.apache.hadoop.hdfs.ByteRangeInputStream; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; @@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; @@ -181,7 +181,14 @@ public synchronized void initialize(URI uri, Configuration conf throw new IllegalArgumentException(e); } this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); - this.retryPolicy = NameNodeProxies.getDefaultRetryPolicy(conf); + this.retryPolicy = + RetryUtils.getDefaultRetryPolicy( + conf, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT, + SafeModeException.class); this.workingDir = getHomeDirectory(); if (UserGroupInformation.isSecurityEnabled()) { From 54e612bfb9f877e58f7f153c43cb4147876826d3 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Sat, 1 Sep 2012 19:22:11 +0000 Subject: [PATCH 49/62] HDFS-2580. NameNode#main(...) can make use of GenericOptionsParser. Contributed by harsh. (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379828 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../org/apache/hadoop/hdfs/server/namenode/NameNode.java | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 18c354c828..8d875eba3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -131,6 +131,8 @@ Trunk (unreleased changes) HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh) + HDFS-2580. NameNode#main(...) can make use of GenericOptionsParser. (harsh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index c1bca3b45b..6ff3e1def2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -73,6 +73,7 @@ import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.util.ExitUtil.ExitException; +import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; @@ -1056,6 +1057,10 @@ public static NameNode createNameNode(String argv[], Configuration conf) throws IOException { if (conf == null) conf = new HdfsConfiguration(); + // Parse out some generic args into Configuration. + GenericOptionsParser hParser = new GenericOptionsParser(conf, argv); + argv = hParser.getRemainingArgs(); + // Parse the rest, NN specific args. StartupOption startOpt = parseArguments(argv); if (startOpt == null) { printUsage(System.err); From da3bd6713871a084430eb0360ece5344c034711e Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Sun, 2 Sep 2012 06:20:41 +0000 Subject: [PATCH 50/62] HDFS-3880. Use Builder to build RPC server in HDFS. Contributed by Brandon Li. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379917 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/datanode/DataNode.java | 13 ++++++--- .../server/journalservice/JournalService.java | 5 ++-- .../server/namenode/NameNodeRpcServer.java | 27 +++++++++++-------- ...TestClientProtocolWithDelegationToken.java | 8 +++--- .../security/token/block/TestBlockToken.java | 5 ++-- 6 files changed, 39 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8d875eba3d..a0255edf34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -133,6 +133,9 @@ Trunk (unreleased changes) HDFS-2580. NameNode#main(...) can make use of GenericOptionsParser. (harsh) + HDFS-3880. Use Builder to build RPC server in HDFS. + (Brandon Li vias suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 55d4571ad7..e4c36134e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -417,10 +417,15 @@ private void initIpcServer(Configuration conf) throws IOException { new ClientDatanodeProtocolServerSideTranslatorPB(this); BlockingService service = ClientDatanodeProtocolService .newReflectiveBlockingService(clientDatanodeProtocolXlator); - ipcServer = RPC.getServer(ClientDatanodeProtocolPB.class, service, ipcAddr - .getHostName(), ipcAddr.getPort(), conf.getInt( - DFS_DATANODE_HANDLER_COUNT_KEY, DFS_DATANODE_HANDLER_COUNT_DEFAULT), - false, conf, blockPoolTokenSecretManager); + ipcServer = new RPC.Builder(conf) + .setProtocol(ClientDatanodeProtocolPB.class) + .setInstance(service) + .setBindAddress(ipcAddr.getHostName()) + .setPort(ipcAddr.getPort()) + .setNumHandlers( + conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, + DFS_DATANODE_HANDLER_COUNT_DEFAULT)).setVerbose(false) + .setSecretManager(blockPoolTokenSecretManager).build(); InterDatanodeProtocolServerSideTranslatorPB interDatanodeProtocolXlator = new InterDatanodeProtocolServerSideTranslatorPB(this); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java index e8d7073670..4764259506 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java @@ -283,8 +283,9 @@ private static RPC.Server createRpcServer(Configuration conf, new JournalProtocolServerSideTranslatorPB(impl); BlockingService service = JournalProtocolService.newReflectiveBlockingService(xlator); - return RPC.getServer(JournalProtocolPB.class, service, - address.getHostName(), address.getPort(), 1, false, conf, null); + return new RPC.Builder(conf).setProtocol(JournalProtocolPB.class) + .setInstance(service).setBindAddress(address.getHostName()) + .setPort(address.getPort()).setNumHandlers(1).setVerbose(false).build(); } private void verifyEpoch(long e) throws FencedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 1c80c8a794..2f4037bea9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -206,12 +206,15 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); // Add all the RPC protocols that the namenode implements - this.serviceRpcServer = - RPC.getServer(org.apache.hadoop.hdfs.protocolPB. - ClientNamenodeProtocolPB.class, clientNNPbService, - dnSocketAddr.getHostName(), dnSocketAddr.getPort(), - serviceHandlerCount, - false, conf, namesystem.getDelegationTokenSecretManager()); + this.serviceRpcServer = new RPC.Builder(conf) + .setProtocol( + org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) + .setInstance(clientNNPbService) + .setBindAddress(dnSocketAddr.getHostName()) + .setPort(dnSocketAddr.getPort()).setNumHandlers(serviceHandlerCount) + .setVerbose(false) + .setSecretManager(namesystem.getDelegationTokenSecretManager()) + .build(); DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, serviceRpcServer); DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, @@ -232,11 +235,13 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) serviceRPCAddress = null; } // Add all the RPC protocols that the namenode implements - this.clientRpcServer = RPC.getServer( - org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class, - clientNNPbService, socAddr.getHostName(), - socAddr.getPort(), handlerCount, false, conf, - namesystem.getDelegationTokenSecretManager()); + this.clientRpcServer = new RPC.Builder(conf) + .setProtocol( + org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) + .setInstance(clientNNPbService).setBindAddress(socAddr.getHostName()) + .setPort(socAddr.getPort()).setNumHandlers(handlerCount) + .setVerbose(false) + .setSecretManager(namesystem.getDelegationTokenSecretManager()).build(); DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, clientRpcServer); DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java index e54b8bccc2..1eaa0caea3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java @@ -80,9 +80,11 @@ public void testDelegationTokenRpc() throws Exception { DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT, 3600000, mockNameSys); sm.startThreads(); - final Server server = RPC.getServer(ClientProtocol.class, mockNN, ADDRESS, - 0, 5, true, conf, sm); - + final Server server = new RPC.Builder(conf) + .setProtocol(ClientProtocol.class).setInstance(mockNN) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .setSecretManager(sm).build(); + server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index a9b5562188..f32b70dc3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -231,8 +231,9 @@ private Server createMockDatanode(BlockTokenSecretManager sm, ProtobufRpcEngine.class); BlockingService service = ClientDatanodeProtocolService .newReflectiveBlockingService(mockDN); - return RPC.getServer(ClientDatanodeProtocolPB.class, service, ADDRESS, 0, 5, - true, conf, sm); + return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class) + .setInstance(service).setBindAddress(ADDRESS).setPort(0) + .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); } @Test From 3969bcb7c9dcd2fdbd7d5ae085d30f3ad74e0025 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Sun, 2 Sep 2012 16:20:13 +0000 Subject: [PATCH 51/62] HDFS-3469. start-dfs.sh will start zkfc, but stop-dfs.sh will not stop zkfc similarly. Contributed by Vinay git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380003 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop-hdfs/src/main/bin/stop-dfs.sh | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a0255edf34..65d82a4443 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -711,6 +711,9 @@ Branch-2 ( Unreleased changes ) HDFS-3466. Get HTTP kerberos principal from the web authentication keytab. (omalley) + + HDFS-3469. start-dfs.sh will start zkfc, but stop-dfs.sh will not stop zkfc similarly. + (Vinay via umamahesh) BREAKDOWN OF HDFS-3042 SUBTASKS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh index dff41526f9..930b5fbd7a 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh @@ -61,4 +61,14 @@ if [ -n "$SECONDARY_NAMENODES" ]; then --script "$bin/hdfs" stop secondarynamenode fi +#--------------------------------------------------------- +# ZK Failover controllers, if auto-HA is enabled +AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled) +if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then + echo "Stopping ZK Failover Controllers on NN hosts [$NAMENODES]" + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$NAMENODES" \ + --script "$bin/hdfs" stop zkfc +fi # eof From ab986d7cf6a4ae889819c512873ae3410a45cf04 Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Mon, 3 Sep 2012 18:54:27 +0000 Subject: [PATCH 52/62] MAPREDUCE-2786. Add compression option for TestDFSIO. Contributed by Plamen Jeliazkov. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380310 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../org/apache/hadoop/fs/IOMapperBase.java | 19 +++++++++++++++++++ .../java/org/apache/hadoop/fs/TestDFSIO.java | 18 +++++++++++++++++- 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 501970f098..c2633ea9be 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -147,6 +147,9 @@ Branch-2 ( Unreleased changes ) MAPREDUCE-4408. allow jobs to set a JAR that is in the distributed cached (rkanter via tucu) + MAPREDUCE-2786. Add compression option for TestDFSIO. + (Plamen Jeliazkov via shv) + BUG FIXES MAPREDUCE-4422. YARN_APPLICATION_CLASSPATH needs a documented default value in diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java index 69741f8fa3..fe1af6afcd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java @@ -22,7 +22,9 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.mapred.*; +import org.apache.hadoop.util.ReflectionUtils; /** * Base mapper class for IO operations. @@ -41,6 +43,7 @@ public abstract class IOMapperBase extends Configured protected int bufferSize; protected FileSystem fs; protected String hostName; + protected CompressionCodec compressionCodec; public IOMapperBase() { } @@ -59,6 +62,22 @@ public void configure(JobConf conf) { } catch(Exception e) { hostName = "localhost"; } + + //grab compression + String compression = getConf().get("test.io.compression.class", null); + Class codec; + + //try to initialize codec + try { + codec = (compression == null) ? null : + Class.forName(compression).asSubclass(CompressionCodec.class); + } catch(Exception e) { + throw new RuntimeException("Compression codec not found: ", e); + } + + if(codec != null) { + compressionCodec = (CompressionCodec) ReflectionUtils.newInstance(codec, getConf()); + } } public void close() throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java index 896240eed0..0d589ff59f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java @@ -23,6 +23,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintStream; @@ -295,6 +296,8 @@ public Long doIO(Reporter reporter, // create file OutputStream out; out = fs.create(new Path(getDataDir(getConf()), name), true, bufferSize); + + if(compressionCodec != null) out = compressionCodec.createOutputStream(out); try { // write to the file @@ -358,6 +361,8 @@ public Long doIO(Reporter reporter, OutputStream out; out = fs.append(new Path(getDataDir(getConf()), name), bufferSize); + if(compressionCodec != null) out = compressionCodec.createOutputStream(out); + try { // write to the file long nrRemaining; @@ -394,7 +399,10 @@ public Long doIO(Reporter reporter, long totalSize // in bytes ) throws IOException { // open file - DataInputStream in = fs.open(new Path(getDataDir(getConf()), name)); + InputStream in = fs.open(new Path(getDataDir(getConf()), name)); + + if(compressionCodec != null) in = compressionCodec.createInputStream(in); + long actualSize = 0; try { while (actualSize < totalSize) { @@ -459,6 +467,7 @@ public int run(String[] args) throws IOException { long fileSize = 1*MEGA; int nrFiles = 1; String resFileName = DEFAULT_RES_FILE_NAME; + String compressionClass = null; boolean isSequential = false; String version = TestDFSIO.class.getSimpleName() + ".0.0.6"; @@ -479,6 +488,8 @@ public int run(String[] args) throws IOException { testType = TEST_TYPE_CLEANUP; } else if (args[i].startsWith("-seq")) { isSequential = true; + } else if (args[i].startsWith("-compression")) { + compressionClass = args[++i]; } else if (args[i].equals("-nrFiles")) { nrFiles = Integer.parseInt(args[++i]); } else if (args[i].equals("-fileSize")) { @@ -497,6 +508,11 @@ public int run(String[] args) throws IOException { LOG.info("fileSize (MB) = " + toMB(fileSize)); LOG.info("bufferSize = " + bufferSize); LOG.info("baseDir = " + getBaseDir(config)); + + if(compressionClass != null) { + config.set("test.io.compression.class", compressionClass); + LOG.info("compressionClass = " + compressionClass); + } config.setInt("test.io.file.buffer.size", bufferSize); config.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); From b1632b4a7771d073c87fbcf973cb4a1c530fe2a1 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 4 Sep 2012 20:56:53 +0000 Subject: [PATCH 53/62] YARN-10. Fix DistributedShell module to not have a dependency on hadoop-mapreduce-client-core. Contributed by Hitesh Shah. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380879 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 11 +++++++++++ .../hadoop-yarn-applications-distributedshell/pom.xml | 5 ----- .../distributedshell/TestDistributedShell.java | 1 - 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6b0c12f116..54c9c8799e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -8,6 +8,8 @@ Trunk (unreleased changes) IMPROVEMENTS + OPTIMAZATIONS + BUG FIXES Branch-2 ( Unreleased changes ) @@ -18,6 +20,8 @@ Branch-2 ( Unreleased changes ) IMPROVEMENTS + OPTIMAZATIONS + BUG FIXES MAPREDUCE-2374. "Text File Busy" errors launching MR tasks. (Andy Isaacson @@ -35,6 +39,11 @@ Release 2.1.0-alpha - Unreleased YARN-29. Add a yarn-client module. (Vinod Kumar Vavilapalli via sseth) + YARN-10. Fix DistributedShell module to not have a dependency on + hadoop-mapreduce-client-core. (Hitesh Shah via vinodkv) + + OPTIMAZATIONS + BUG FIXES YARN-12. Fix findbugs warnings in FairScheduler. (Junping Du via acmurthy) @@ -53,6 +62,8 @@ Release 0.23.3 - Unreleased IMPROVEMENTS + OPTIMAZATIONS + BUG FIXES YARN-14. Symlinks to peer distributed cache files no longer work diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index d29edc1be2..c6300504f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -60,11 +60,6 @@ hadoop-yarn-server-common test - - org.apache.hadoop - hadoop-mapreduce-client-core - test - org.apache.hadoop hadoop-yarn-server-tests diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 68b1d0a1e6..e76e2db9f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.MiniYARNCluster; From f5943c1eef879b0181df24d056f76dcb4031be5f Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Tue, 4 Sep 2012 21:45:33 +0000 Subject: [PATCH 54/62] Updated CHANGES.txt for 0.23.4 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380906 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 ++++++++++++ hadoop-mapreduce-project/CHANGES.txt | 12 ++++++++++++ hadoop-yarn-project/CHANGES.txt | 12 ++++++++++++ 4 files changed, 48 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 61c44f3eba..bd1339e6e0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -865,6 +865,18 @@ Release 2.0.0-alpha - 05-23-2012 HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via bobby) +Release 0.23.4 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 65d82a4443..2cc7ffaa58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1544,6 +1544,18 @@ Release 2.0.0-alpha - 05-23-2012 HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm) +Release 0.23.4 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index c2633ea9be..a56eaf7a87 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -509,6 +509,18 @@ Release 2.0.0-alpha - 05-23-2012 MAPREDUCE-4444. nodemanager fails to start when one of the local-dirs is bad (Jason Lowe via bobby) +Release 0.23.4 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 54c9c8799e..fff59ec272 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -54,6 +54,18 @@ Release 2.1.0-alpha - Unreleased YARN-37. Change TestRMAppTransitions to use the DrainDispatcher. (Mayank Bansal via sseth) +Release 0.23.4 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.3 - Unreleased INCOMPATIBLE CHANGES From 60af999c738dfc921d90bbde303081c9f1c88941 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Tue, 4 Sep 2012 22:24:04 +0000 Subject: [PATCH 55/62] YARN-84. Use Builder to build RPC server. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380921 13f79535-47bb-0310-9956-ffa450edef68 --- .../security/TestUmbilicalProtocolWithJobToken.java | 6 ++++-- hadoop-yarn-project/CHANGES.txt | 2 ++ .../yarn/factories/impl/pb/RpcServerFactoryPBImpl.java | 8 +++++--- .../hadoop/yarn/server/nodemanager/TestNMAuditLogger.java | 6 ++++-- .../yarn/server/resourcemanager/TestRMAuditLogger.java | 5 +++-- 5 files changed, 18 insertions(+), 9 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java index 8167102ab8..4736d1935b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java @@ -88,8 +88,10 @@ public void testJobTokenRpc() throws Exception { .when(mockTT).getProtocolSignature(anyString(), anyLong(), anyInt()); JobTokenSecretManager sm = new JobTokenSecretManager(); - final Server server = RPC.getServer(TaskUmbilicalProtocol.class, mockTT, - ADDRESS, 0, 5, true, conf, sm); + final Server server = new RPC.Builder(conf) + .setProtocol(TaskUmbilicalProtocol.class).setInstance(mockTT) + .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) + .setSecretManager(sm).build(); server.start(); diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index fff59ec272..4adcdf60f6 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -8,6 +8,8 @@ Trunk (unreleased changes) IMPROVEMENTS + YARN-84. Use Builder to build RPC server. (Brandon Li via suresh) + OPTIMAZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java index 075ae572bc..21daf01959 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java @@ -166,9 +166,11 @@ private Server createServer(Class pbProtocol, InetSocketAddress addr, Configu SecretManager secretManager, int numHandlers, BlockingService blockingService, String portRangeConfig) throws IOException { RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class); - RPC.Server server = RPC.getServer(pbProtocol, blockingService, - addr.getHostName(), addr.getPort(), numHandlers, false, conf, - secretManager, portRangeConfig); + RPC.Server server = new RPC.Builder(conf).setProtocol(pbProtocol) + .setInstance(blockingService).setBindAddress(addr.getHostName()) + .setPort(addr.getPort()).setNumHandlers(numHandlers).setVerbose(false) + .setSecretManager(secretManager).setPortRangeConfig(portRangeConfig) + .build(); LOG.info("Adding protocol "+pbProtocol.getCanonicalName()+" to the server"); server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, pbProtocol, blockingService); return server; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java index d33d73a8a9..abaeada60b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java @@ -204,8 +204,10 @@ public void ping() { public void testNMAuditLoggerWithIP() throws Exception { Configuration conf = new Configuration(); // start the IPC server - Server server = RPC.getServer(TestProtocol.class, - new MyTestRPCServer(), "0.0.0.0", 0, 5, true, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0") + .setPort(0).setNumHandlers(5).setVerbose(true).build(); + server.start(); InetSocketAddress addr = NetUtils.getConnectAddress(server); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java index b717ceeeb8..116d604d46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java @@ -222,8 +222,9 @@ public void ping() { public void testRMAuditLoggerWithIP() throws Exception { Configuration conf = new Configuration(); // start the IPC server - Server server = RPC.getServer(TestProtocol.class, - new MyTestRPCServer(), "0.0.0.0", 0, 5, true, conf, null); + Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class) + .setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0") + .setPort(0).setNumHandlers(5).setVerbose(true).build(); server.start(); InetSocketAddress addr = NetUtils.getConnectAddress(server); From 4244c1443c2c287d479961a89c9c7684a5b47137 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 4 Sep 2012 22:55:40 +0000 Subject: [PATCH 56/62] HDFS-3866. HttpFS POM should have property where to download tomcat from (zero45 via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380927 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 3 ++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 661f5ef1ee..6588c4033b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -45,6 +45,7 @@ LOCALHOST **/TestHttpFSWithKerberos.java + http://archive.apache.org/dist/tomcat/tomcat-6/v${tomcat.version}/bin/apache-tomcat-${tomcat.version}.tar.gz @@ -527,7 +528,7 @@ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2cc7ffaa58..1abbf22d42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -458,6 +458,8 @@ Branch-2 ( Unreleased changes ) HDFS-2421. Improve the concurrency of SerialNumberMap in NameNode. (Jing Zhao and Weiyan Wang via szetszwo) + HDFS-3866. HttpFS POM should have property where to download tomcat from (zero45 via tucu) + BUG FIXES HDFS-3385. The last block of INodeFileUnderConstruction is not From f3927595cc516381b1ae568e2d883a1d89993cbb Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 4 Sep 2012 23:48:01 +0000 Subject: [PATCH 57/62] HDFS-3887. Remove redundant chooseTarget methods in BlockPlacementPolicy. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380934 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockManager.java | 5 +- .../blockmanagement/BlockPlacementPolicy.java | 62 +-------------- .../web/resources/NamenodeWebHdfsMethods.java | 6 +- .../TestReplicationPolicy.java | 76 +++++++++--------- .../TestReplicationPolicyWithNodeGroup.java | 78 +++++++++---------- 6 files changed, 88 insertions(+), 142 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1abbf22d42..fac4f44038 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -442,6 +442,9 @@ Branch-2 ( Unreleased changes ) HDFS-3871. Change NameNodeProxies to use RetryUtils. (Arun C Murthy via szetszwo) + HDFS-3887. Remove redundant chooseTarget methods in BlockPlacementPolicy. + (Jing Zhao via szetszwo) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 01ee2a1222..7215aa5582 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1315,8 +1315,9 @@ public DatanodeDescriptor[] chooseTarget(final String src, final HashMap excludedNodes, final long blocksize) throws IOException { // choose targets for the new block to be allocated. - final DatanodeDescriptor targets[] = blockplacement.chooseTarget( - src, numOfReplicas, client, excludedNodes, blocksize); + final DatanodeDescriptor targets[] = blockplacement.chooseTarget(src, + numOfReplicas, client, new ArrayList(), false, + excludedNodes, blocksize); if (targets.length < minReplication) { throw new IOException("File " + src + " could only be replicated to " + targets.length + " nodes instead of minReplication (=" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index e3317467bd..4243bcdc65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -70,21 +70,6 @@ abstract DatanodeDescriptor[] chooseTarget(String srcPath, List chosenNodes, long blocksize); - /** - * Same as - * {{@link #chooseTarget(String, int, DatanodeDescriptor, List, boolean, HashMap, long)} - * with returnChosenNodes equal to false. - */ - final DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - HashMap excludedNodes, - long blocksize) { - return chooseTarget(srcPath, numOfReplicas, writer, chosenNodes, false, - excludedNodes, blocksize); - } - /** * choose numOfReplicas data nodes for writer * to re-replicate a block with size blocksize @@ -131,7 +116,7 @@ DatanodeDescriptor[] chooseTarget(BlockCollection srcBC, HashMap excludedNodes, long blocksize) { return chooseTarget(srcBC.getName(), numOfReplicas, writer, - chosenNodes, excludedNodes, blocksize); + chosenNodes, false, excludedNodes, blocksize); } /** @@ -198,51 +183,6 @@ public static BlockPlacementPolicy getInstance(Configuration conf, replicator.initialize(conf, stats, clusterMap); return replicator; } - - /** - * choose numOfReplicas nodes for writer to replicate - * a block with size blocksize - * If not, return as many as we can. - * - * @param srcPath a string representation of the file for which chooseTarget is invoked - * @param numOfReplicas number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param blocksize size of the data to be written. - * @return array of DatanodeDescriptor instances chosen as targets - * and sorted as a pipeline. - */ - DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - long blocksize) { - return chooseTarget(srcPath, numOfReplicas, writer, - new ArrayList(), - blocksize); - } - - /** - * choose numOfReplicas nodes for writer to replicate - * a block with size blocksize - * If not, return as many as we can. - * - * @param srcPath a string representation of the file for which chooseTarget is invoked - * @param numOfReplicas number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param blocksize size of the data to be written. - * @param excludedNodes datanodes that should not be considered as targets. - * @return array of DatanodeDescriptor instances chosen as targets - * and sorted as a pipeline. - */ - public DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - HashMap excludedNodes, - long blocksize) { - return chooseTarget(srcPath, numOfReplicas, writer, - new ArrayList(), - excludedNodes, - blocksize); - } /** * Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 0ee13519f6..912dee1037 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -25,6 +25,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.EnumSet; import javax.servlet.ServletContext; @@ -163,8 +164,9 @@ static DatanodeInfo chooseDatanode(final NameNode namenode, final DatanodeDescriptor clientNode = bm.getDatanodeManager( ).getDatanodeByHost(getRemoteAddress()); if (clientNode != null) { - final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy( - ).chooseTarget(path, 1, clientNode, null, blocksize); + final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy() + .chooseTarget(path, 1, clientNode, + new ArrayList(), false, null, blocksize); if (datanodes.length > 0) { return datanodes[0]; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ada74ce7fc..4d7356eb4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -111,30 +111,30 @@ public void testChooseTarget1() throws Exception { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + 2, dataNodes[0], new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); assertTrue(cluster.isOnSameRack(targets[1], targets[2]) || @@ -249,30 +249,30 @@ public void testChooseTarget3() throws Exception { (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[1]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[1]); for(int i=1; i<4; i++) { @@ -305,23 +305,23 @@ public void testChoooseTarget4() throws Exception { } DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); for(int i=0; i<3; i++) { assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0])); @@ -350,21 +350,21 @@ public void testChooseTarget5() throws Exception { DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4"); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, writerDesc, BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - - targets = replicator.chooseTarget(filename, - 1, writerDesc, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 1, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); - - targets = replicator.chooseTarget(filename, - 2, writerDesc, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 2, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - - targets = replicator.chooseTarget(filename, - 3, writerDesc, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 3, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 5fbd44d5e1..d8efd3a029 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -114,31 +114,31 @@ public void testChooseTarget1() throws Exception { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameNodeGroup(targets[1], targets[2])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); assertTrue(cluster.isOnSameRack(targets[1], targets[2]) || @@ -220,30 +220,30 @@ public void testChooseTarget3() throws Exception { (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[1]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isNodeGroupAware()); @@ -275,23 +275,23 @@ public void testChooseTarget4() throws Exception { } DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); for(int i=0; i<3; i++) { assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0])); @@ -313,21 +313,21 @@ public void testChooseTarget4() throws Exception { public void testChooseTarget5() throws Exception { setupDataNodeCapacity(); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, NODE, BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - - targets = replicator.chooseTarget(filename, - 1, NODE, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 1, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); - - targets = replicator.chooseTarget(filename, - 2, NODE, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 2, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - - targets = replicator.chooseTarget(filename, - 3, NODE, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 3, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); From 658e7b5818fe38419b3192a801d14e82864ea4ae Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 4 Sep 2012 23:56:10 +0000 Subject: [PATCH 58/62] HDFS-3888. Clean up BlockPlacementPolicyDefault. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380939 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../BlockPlacementPolicyDefault.java | 34 ++++++++++++------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fac4f44038..6994dfc6f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -445,6 +445,8 @@ Branch-2 ( Unreleased changes ) HDFS-3887. Remove redundant chooseTarget methods in BlockPlacementPolicy. (Jing Zhao via szetszwo) + HDFS-3888. Clean up BlockPlacementPolicyDefault. (Jing Zhao via szetszwo) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 6096118cde..bc396539c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -27,8 +27,6 @@ import java.util.Set; import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -55,9 +53,6 @@ @InterfaceAudience.Private public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { - private static final Log LOG = - LogFactory.getLog(BlockPlacementPolicyDefault.class.getName()); - private static final String enableDebugLogging = "For more information, please enable DEBUG log level on " + LOG.getClass().getName(); @@ -124,7 +119,6 @@ public DatanodeDescriptor[] chooseTarget(String srcPath, excludedNodes, blocksize); } - /** This is the implementation. */ DatanodeDescriptor[] chooseTarget(int numOfReplicas, DatanodeDescriptor writer, @@ -162,7 +156,8 @@ DatanodeDescriptor[] chooseTarget(int numOfReplicas, } DatanodeDescriptor localNode = chooseTarget(numOfReplicas, writer, - excludedNodes, blocksize, maxNodesPerRack, results); + excludedNodes, blocksize, + maxNodesPerRack, results); if (!returnChosenNodes) { results.removeAll(chosenNodes); } @@ -455,14 +450,29 @@ protected void adjustExcludedNodes(HashMap excludedNodes, * does not have too much load, and the rack does not have too many nodes */ private boolean isGoodTarget(DatanodeDescriptor node, - long blockSize, int maxTargetPerLoc, + long blockSize, int maxTargetPerRack, List results) { - return isGoodTarget(node, blockSize, maxTargetPerLoc, + return isGoodTarget(node, blockSize, maxTargetPerRack, this.considerLoad, results); } - + + /** + * Determine if a node is a good target. + * + * @param node The target node + * @param blockSize Size of block + * @param maxTargetPerRack Maximum number of targets per rack. The value of + * this parameter depends on the number of racks in + * the cluster and total number of replicas for a block + * @param considerLoad whether or not to consider load of the target node + * @param results A list containing currently chosen nodes. Used to check if + * too many nodes has been chosen in the target rack. + * @return Return true if node has enough space, + * does not have too much load, + * and the rack does not have too many nodes. + */ protected boolean isGoodTarget(DatanodeDescriptor node, - long blockSize, int maxTargetPerLoc, + long blockSize, int maxTargetPerRack, boolean considerLoad, List results) { // check if the node is (being) decommissed @@ -514,7 +524,7 @@ protected boolean isGoodTarget(DatanodeDescriptor node, counter++; } } - if (counter>maxTargetPerLoc) { + if (counter>maxTargetPerRack) { if(LOG.isDebugEnabled()) { threadLocalBuilder.get().append(node.toString()).append(": ") .append("Node ").append(NodeBase.getPath(node)) From ab74b1addeaf36359f4dd300471e2b185792bcd5 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 5 Sep 2012 00:14:35 +0000 Subject: [PATCH 59/62] YARN-79. Implement close on all clients to YARN so that RPC clients don't throw exceptions on shut-down. Contributed by Vinod Kumar Vavilapalli. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380942 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../hadoop-yarn/hadoop-yarn-client/pom.xml | 5 + .../hadoop/yarn/client/TestYarnClient.java | 14 +++ .../pb/client/AMRMProtocolPBClientImpl.java | 33 +++--- .../client/ClientRMProtocolPBClientImpl.java | 101 +++++++++++------- .../client/ContainerManagerPBClientImpl.java | 38 ++++--- 6 files changed, 127 insertions(+), 67 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 4adcdf60f6..4cafe99921 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -56,6 +56,9 @@ Release 2.1.0-alpha - Unreleased YARN-37. Change TestRMAppTransitions to use the DrainDispatcher. (Mayank Bansal via sseth) + YARN-79. Implement close on all clients to YARN so that RPC clients don't + throw exceptions on shut-down. (Vinod Kumar Vavilapalli) + Release 0.23.4 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index 2646f6d0dc..45fbc0070c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -33,5 +33,10 @@ org.apache.hadoop hadoop-yarn-common + + org.apache.hadoop + hadoop-yarn-server-resourcemanager + test + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java index 58737da1f7..d0fc31f454 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/hadoop/yarn/client/TestYarnClient.java @@ -18,6 +18,8 @@ package org.hadoop.yarn.client; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.junit.Test; public class TestYarnClient { @@ -27,4 +29,16 @@ public void test() { // More to come later. } + @Test + public void testClientStop() { + Configuration conf = new Configuration(); + ResourceManager rm = new ResourceManager(null); + rm.init(conf); + rm.start(); + + YarnClient client = new YarnClientImpl(); + client.init(conf); + client.start(); + client.stop(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java index 2931430048..382d913eef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.api.impl.pb.client; +import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; @@ -46,16 +47,19 @@ import com.google.protobuf.ServiceException; -public class AMRMProtocolPBClientImpl implements AMRMProtocol { +public class AMRMProtocolPBClientImpl implements AMRMProtocol, Closeable { private AMRMProtocolPB proxy; - - public AMRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { + + public AMRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, + Configuration conf) throws IOException { RPC.setProtocolEngine(conf, AMRMProtocolPB.class, ProtobufRpcEngine.class); - proxy = (AMRMProtocolPB)RPC.getProxy( - AMRMProtocolPB.class, clientVersion, addr, conf); + proxy = + (AMRMProtocolPB) RPC.getProxy(AMRMProtocolPB.class, clientVersion, + addr, conf); } - + + @Override public void close() { if (this.proxy != null) { RPC.stopProxy(this.proxy); @@ -65,7 +69,8 @@ public void close() { @Override public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException { - AllocateRequestProto requestProto = ((AllocateRequestPBImpl)request).getProto(); + AllocateRequestProto requestProto = + ((AllocateRequestPBImpl) request).getProto(); try { return new AllocateResponsePBImpl(proxy.allocate(null, requestProto)); } catch (ServiceException e) { @@ -73,14 +78,14 @@ public AllocateResponse allocate(AllocateRequest request) } } - - @Override public FinishApplicationMasterResponse finishApplicationMaster( FinishApplicationMasterRequest request) throws YarnRemoteException { - FinishApplicationMasterRequestProto requestProto = ((FinishApplicationMasterRequestPBImpl)request).getProto(); + FinishApplicationMasterRequestProto requestProto = + ((FinishApplicationMasterRequestPBImpl) request).getProto(); try { - return new FinishApplicationMasterResponsePBImpl(proxy.finishApplicationMaster(null, requestProto)); + return new FinishApplicationMasterResponsePBImpl( + proxy.finishApplicationMaster(null, requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -89,9 +94,11 @@ public FinishApplicationMasterResponse finishApplicationMaster( @Override public RegisterApplicationMasterResponse registerApplicationMaster( RegisterApplicationMasterRequest request) throws YarnRemoteException { - RegisterApplicationMasterRequestProto requestProto = ((RegisterApplicationMasterRequestPBImpl)request).getProto(); + RegisterApplicationMasterRequestProto requestProto = + ((RegisterApplicationMasterRequestPBImpl) request).getProto(); try { - return new RegisterApplicationMasterResponsePBImpl(proxy.registerApplicationMaster(null, requestProto)); + return new RegisterApplicationMasterResponsePBImpl( + proxy.registerApplicationMaster(null, requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java index 4167e29b9d..0f2bf7a34f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.api.impl.pb.client; +import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; @@ -81,22 +82,35 @@ import com.google.protobuf.ServiceException; -public class ClientRMProtocolPBClientImpl implements ClientRMProtocol { +public class ClientRMProtocolPBClientImpl implements ClientRMProtocol, + Closeable { private ClientRMProtocolPB proxy; - - public ClientRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ClientRMProtocolPB.class, ProtobufRpcEngine.class); - proxy = (ClientRMProtocolPB)RPC.getProxy( - ClientRMProtocolPB.class, clientVersion, addr, conf); + + public ClientRMProtocolPBClientImpl(long clientVersion, + InetSocketAddress addr, Configuration conf) throws IOException { + RPC.setProtocolEngine(conf, ClientRMProtocolPB.class, + ProtobufRpcEngine.class); + proxy = + (ClientRMProtocolPB) RPC.getProxy(ClientRMProtocolPB.class, + clientVersion, addr, conf); } - + + @Override + public void close() { + if (this.proxy != null) { + RPC.stopProxy(this.proxy); + } + } + @Override public KillApplicationResponse forceKillApplication( KillApplicationRequest request) throws YarnRemoteException { - KillApplicationRequestProto requestProto = ((KillApplicationRequestPBImpl)request).getProto(); + KillApplicationRequestProto requestProto = + ((KillApplicationRequestPBImpl) request).getProto(); try { - return new KillApplicationResponsePBImpl(proxy.forceKillApplication(null, requestProto)); + return new KillApplicationResponsePBImpl(proxy.forceKillApplication(null, + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -105,9 +119,11 @@ public KillApplicationResponse forceKillApplication( @Override public GetApplicationReportResponse getApplicationReport( GetApplicationReportRequest request) throws YarnRemoteException { - GetApplicationReportRequestProto requestProto = ((GetApplicationReportRequestPBImpl)request).getProto(); + GetApplicationReportRequestProto requestProto = + ((GetApplicationReportRequestPBImpl) request).getProto(); try { - return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport(null, requestProto)); + return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport( + null, requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -116,9 +132,11 @@ public GetApplicationReportResponse getApplicationReport( @Override public GetClusterMetricsResponse getClusterMetrics( GetClusterMetricsRequest request) throws YarnRemoteException { - GetClusterMetricsRequestProto requestProto = ((GetClusterMetricsRequestPBImpl)request).getProto(); + GetClusterMetricsRequestProto requestProto = + ((GetClusterMetricsRequestPBImpl) request).getProto(); try { - return new GetClusterMetricsResponsePBImpl(proxy.getClusterMetrics(null, requestProto)); + return new GetClusterMetricsResponsePBImpl(proxy.getClusterMetrics(null, + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -127,9 +145,11 @@ public GetClusterMetricsResponse getClusterMetrics( @Override public GetNewApplicationResponse getNewApplication( GetNewApplicationRequest request) throws YarnRemoteException { - GetNewApplicationRequestProto requestProto = ((GetNewApplicationRequestPBImpl)request).getProto(); + GetNewApplicationRequestProto requestProto = + ((GetNewApplicationRequestPBImpl) request).getProto(); try { - return new GetNewApplicationResponsePBImpl(proxy.getNewApplication(null, requestProto)); + return new GetNewApplicationResponsePBImpl(proxy.getNewApplication(null, + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -138,9 +158,11 @@ public GetNewApplicationResponse getNewApplication( @Override public SubmitApplicationResponse submitApplication( SubmitApplicationRequest request) throws YarnRemoteException { - SubmitApplicationRequestProto requestProto = ((SubmitApplicationRequestPBImpl)request).getProto(); + SubmitApplicationRequestProto requestProto = + ((SubmitApplicationRequestPBImpl) request).getProto(); try { - return new SubmitApplicationResponsePBImpl(proxy.submitApplication(null, requestProto)); + return new SubmitApplicationResponsePBImpl(proxy.submitApplication(null, + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -149,24 +171,25 @@ public SubmitApplicationResponse submitApplication( @Override public GetAllApplicationsResponse getAllApplications( GetAllApplicationsRequest request) throws YarnRemoteException { - GetAllApplicationsRequestProto requestProto = - ((GetAllApplicationsRequestPBImpl)request).getProto(); + GetAllApplicationsRequestProto requestProto = + ((GetAllApplicationsRequestPBImpl) request).getProto(); try { - return new GetAllApplicationsResponsePBImpl( - proxy.getAllApplications(null, requestProto)); + return new GetAllApplicationsResponsePBImpl(proxy.getAllApplications( + null, requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @Override - public GetClusterNodesResponse getClusterNodes( - GetClusterNodesRequest request) throws YarnRemoteException { + public GetClusterNodesResponse + getClusterNodes(GetClusterNodesRequest request) + throws YarnRemoteException { GetClusterNodesRequestProto requestProto = - ((GetClusterNodesRequestPBImpl)request).getProto(); + ((GetClusterNodesRequestPBImpl) request).getProto(); try { - return new GetClusterNodesResponsePBImpl( - proxy.getClusterNodes(null, requestProto)); + return new GetClusterNodesResponsePBImpl(proxy.getClusterNodes(null, + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -176,10 +199,10 @@ public GetClusterNodesResponse getClusterNodes( public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) throws YarnRemoteException { GetQueueInfoRequestProto requestProto = - ((GetQueueInfoRequestPBImpl)request).getProto(); + ((GetQueueInfoRequestPBImpl) request).getProto(); try { - return new GetQueueInfoResponsePBImpl( - proxy.getQueueInfo(null, requestProto)); + return new GetQueueInfoResponsePBImpl(proxy.getQueueInfo(null, + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -189,10 +212,10 @@ public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) public GetQueueUserAclsInfoResponse getQueueUserAcls( GetQueueUserAclsInfoRequest request) throws YarnRemoteException { GetQueueUserAclsInfoRequestProto requestProto = - ((GetQueueUserAclsInfoRequestPBImpl)request).getProto(); + ((GetQueueUserAclsInfoRequestPBImpl) request).getProto(); try { - return new GetQueueUserAclsInfoResponsePBImpl( - proxy.getQueueUserAcls(null, requestProto)); + return new GetQueueUserAclsInfoResponsePBImpl(proxy.getQueueUserAcls( + null, requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -202,12 +225,12 @@ public GetQueueUserAclsInfoResponse getQueueUserAcls( public GetDelegationTokenResponse getDelegationToken( GetDelegationTokenRequest request) throws YarnRemoteException { GetDelegationTokenRequestProto requestProto = - ((GetDelegationTokenRequestPBImpl)request).getProto(); - try { - return new GetDelegationTokenResponsePBImpl( - proxy.getDelegationToken(null, requestProto)); - } catch (ServiceException e) { - throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); - } + ((GetDelegationTokenRequestPBImpl) request).getProto(); + try { + return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken( + null, requestProto)); + } catch (ServiceException e) { + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java index 0a96b368fd..cff287a90a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.api.impl.pb.client; +import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.io.Closeable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -58,22 +58,26 @@ public class ContainerManagerPBClientImpl implements ContainerManager, + "rpc.nm-command-timeout"; /** - * Maximum of 1 minute timeout for a Node to react to the command + * Maximum of 1 minute timeout for a Node to react to the command */ static final int DEFAULT_COMMAND_TIMEOUT = 60000; private ContainerManagerPB proxy; - - public ContainerManagerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ContainerManagerPB.class, ProtobufRpcEngine.class); + + public ContainerManagerPBClientImpl(long clientVersion, + InetSocketAddress addr, Configuration conf) throws IOException { + RPC.setProtocolEngine(conf, ContainerManagerPB.class, + ProtobufRpcEngine.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); int expireIntvl = conf.getInt(NM_COMMAND_TIMEOUT, DEFAULT_COMMAND_TIMEOUT); - proxy = (ContainerManagerPB)RPC.getProxy( - ContainerManagerPB.class, clientVersion, addr, ugi, conf, - NetUtils.getDefaultSocketFactory(conf), expireIntvl); + proxy = + (ContainerManagerPB) RPC.getProxy(ContainerManagerPB.class, + clientVersion, addr, ugi, conf, + NetUtils.getDefaultSocketFactory(conf), expireIntvl); } + @Override public void close() { if (this.proxy != null) { RPC.stopProxy(this.proxy); @@ -83,9 +87,11 @@ public void close() { @Override public GetContainerStatusResponse getContainerStatus( GetContainerStatusRequest request) throws YarnRemoteException { - GetContainerStatusRequestProto requestProto = ((GetContainerStatusRequestPBImpl)request).getProto(); + GetContainerStatusRequestProto requestProto = + ((GetContainerStatusRequestPBImpl) request).getProto(); try { - return new GetContainerStatusResponsePBImpl(proxy.getContainerStatus(null, requestProto)); + return new GetContainerStatusResponsePBImpl(proxy.getContainerStatus( + null, requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -94,9 +100,11 @@ public GetContainerStatusResponse getContainerStatus( @Override public StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException { - StartContainerRequestProto requestProto = ((StartContainerRequestPBImpl)request).getProto(); + StartContainerRequestProto requestProto = + ((StartContainerRequestPBImpl) request).getProto(); try { - return new StartContainerResponsePBImpl(proxy.startContainer(null, requestProto)); + return new StartContainerResponsePBImpl(proxy.startContainer(null, + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } @@ -105,11 +113,11 @@ public StartContainerResponse startContainer(StartContainerRequest request) @Override public StopContainerResponse stopContainer(StopContainerRequest request) throws YarnRemoteException { - StopContainerRequestProto requestProto = ((StopContainerRequestPBImpl) request) - .getProto(); + StopContainerRequestProto requestProto = + ((StopContainerRequestPBImpl) request).getProto(); try { return new StopContainerResponsePBImpl(proxy.stopContainer(null, - requestProto)); + requestProto)); } catch (ServiceException e) { throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } From 65b308f7834c0770c7e062def0a67bf9a0e065e8 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 5 Sep 2012 02:46:07 +0000 Subject: [PATCH 60/62] YARN-42. Modify NM's non-aggregating logs' handler to stop properly so that NMs don't get NPEs on startup errors. Contributed by Devaraj K. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380954 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../loghandler/NonAggregatingLogHandler.java | 22 ++++++++++--------- .../TestNonAggregatingLogHandler.java | 18 +++++++++++++++ 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 4cafe99921..8106a37f4a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -59,6 +59,9 @@ Release 2.1.0-alpha - Unreleased YARN-79. Implement close on all clients to YARN so that RPC clients don't throw exceptions on shut-down. (Vinod Kumar Vavilapalli) + YARN-42. Modify NM's non-aggregating logs' handler to stop properly so that + NMs don't get NPEs on startup errors. (Devaraj K via vinodkv) + Release 0.23.4 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java index 3d5ad68cfd..7ec634b0eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java @@ -79,16 +79,18 @@ public void init(Configuration conf) { @Override public void stop() { - sched.shutdown(); - boolean isShutdown = false; - try { - isShutdown = sched.awaitTermination(10, TimeUnit.SECONDS); - } catch (InterruptedException e) { - sched.shutdownNow(); - isShutdown = true; - } - if (!isShutdown) { - sched.shutdownNow(); + if (sched != null) { + sched.shutdown(); + boolean isShutdown = false; + try { + isShutdown = sched.awaitTermination(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + sched.shutdownNow(); + isShutdown = true; + } + if (!isShutdown) { + sched.shutdownNow(); + } } super.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java index bbee9c5848..36251e47e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java @@ -183,6 +183,24 @@ public void testDelayedDelete() { verify(mockSched).schedule(any(Runnable.class), eq(10800l), eq(TimeUnit.SECONDS)); } + + @Test + public void testStop() throws Exception { + NonAggregatingLogHandler aggregatingLogHandler = + new NonAggregatingLogHandler(null, null, null); + + // It should not throw NullPointerException + aggregatingLogHandler.stop(); + + NonAggregatingLogHandlerWithMockExecutor logHandler = + new NonAggregatingLogHandlerWithMockExecutor(null, null, null); + logHandler.init(new Configuration()); + logHandler.stop(); + verify(logHandler.mockSched).shutdown(); + verify(logHandler.mockSched) + .awaitTermination(eq(10l), eq(TimeUnit.SECONDS)); + verify(logHandler.mockSched).shutdownNow(); + } private class NonAggregatingLogHandlerWithMockExecutor extends NonAggregatingLogHandler { From c334cc89a8f42c98ab4dad02ae41c5a02a855974 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Wed, 5 Sep 2012 04:40:23 +0000 Subject: [PATCH 61/62] HDFS-2793. Add an admin command to trigger an edit log roll. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380982 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../org/apache/hadoop/hdfs/DFSClient.java | 14 ++++++++++ .../hadoop/hdfs/DistributedFileSystem.java | 10 +++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 12 +++++++++ ...amenodeProtocolServerSideTranslatorPB.java | 16 +++++++++++ .../ClientNamenodeProtocolTranslatorPB.java | 13 +++++++++ .../hdfs/server/namenode/FSNamesystem.java | 1 + .../server/namenode/NameNodeRpcServer.java | 7 +++++ .../apache/hadoop/hdfs/tools/DFSAdmin.java | 27 +++++++++++++++++++ .../main/proto/ClientNamenodeProtocol.proto | 9 +++++++ .../src/test/resources/testHDFSConf.xml | 17 ++++++++++++ 11 files changed, 128 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6994dfc6f9..42ea8c8394 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -243,6 +243,8 @@ Branch-2 ( Unreleased changes ) HDFS-3150. Add option for clients to contact DNs via hostname. (eli) + HDFS-2793. Add an admin command to trigger an edit log roll. (todd) + IMPROVEMENTS HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 97a146432d..8e7b9f8321 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1870,6 +1870,20 @@ void saveNamespace() throws AccessControlException, IOException { throw re.unwrapRemoteException(AccessControlException.class); } } + + /** + * Rolls the edit log on the active NameNode. + * @return the txid of the new log segment + * + * @see ClientProtocol#rollEdits() + */ + long rollEdits() throws AccessControlException, IOException { + try { + return namenode.rollEdits(); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } + } /** * enable/disable restore failed storage. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index c0e49a201f..3222ef2491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -624,6 +624,16 @@ public boolean setSafeMode(HdfsConstants.SafeModeAction action) public void saveNamespace() throws AccessControlException, IOException { dfs.saveNamespace(); } + + /** + * Rolls the edit log on the active NameNode. + * Requires super-user privileges. + * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#rollEdits() + * @return the transaction ID of the newly created segment + */ + public long rollEdits() throws AccessControlException, IOException { + return dfs.rollEdits(); + } /** * enable/disable/check restoreFaileStorage diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 86bbe10b98..3d2ea4c250 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -667,6 +667,18 @@ public boolean setSafeMode(HdfsConstants.SafeModeAction action) */ public void saveNamespace() throws AccessControlException, IOException; + + /** + * Roll the edit log. + * Requires superuser privileges. + * + * @throws AccessControlException if the superuser privilege is violated + * @throws IOException if log roll fails + * @return the txid of the new segment + */ + @Idempotent + public long rollEdits() throws AccessControlException, IOException; + /** * Enable/Disable restore failed storage. *

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 37ab28505c..1cbb1c08c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -103,6 +103,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto; @@ -537,6 +539,20 @@ public SaveNamespaceResponseProto saveNamespace(RpcController controller, } } + + @Override + public RollEditsResponseProto rollEdits(RpcController controller, + RollEditsRequestProto request) throws ServiceException { + try { + long txid = server.rollEdits(); + return RollEditsResponseProto.newBuilder() + .setNewSegmentTxId(txid) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = RefreshNodesResponseProto.newBuilder().build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 5626f038ac..02f4bb2b37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -87,6 +87,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto; @@ -525,6 +527,17 @@ public void saveNamespace() throws AccessControlException, IOException { throw ProtobufHelper.getRemoteException(e); } } + + @Override + public long rollEdits() throws AccessControlException, IOException { + RollEditsRequestProto req = RollEditsRequestProto.getDefaultInstance(); + try { + RollEditsResponseProto resp = rpcProxy.rollEdits(null, req); + return resp.getNewSegmentTxId(); + } catch (ServiceException se) { + throw ProtobufHelper.getRemoteException(se); + } + } @Override public boolean restoreFailedStorage(String arg) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 23ebffa6b1..5a77e25b23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4415,6 +4415,7 @@ CheckpointSignature rollEditLog() throws IOException { writeLock(); try { checkOperation(OperationCategory.JOURNAL); + checkSuperuserPrivilege(); if (isInSafeMode()) { throw new SafeModeException("Log not rolled", safeMode); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 2f4037bea9..3aa0a75ff2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -709,6 +709,13 @@ public void saveNamespace() throws IOException { namesystem.checkOperation(OperationCategory.UNCHECKED); namesystem.saveNamespace(); } + + @Override // ClientProtocol + public long rollEdits() throws AccessControlException, IOException { + namesystem.checkOperation(OperationCategory.JOURNAL); + CheckpointSignature sig = namesystem.rollEditLog(); + return sig.getCurSegmentTxId(); + } @Override // ClientProtocol public void refreshNodes() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index d5487e41b7..421f7bc3fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -420,6 +420,14 @@ public int saveNamespace() throws IOException { return exitCode; } + public int rollEdits() throws IOException { + DistributedFileSystem dfs = getDFS(); + long txid = dfs.rollEdits(); + System.out.println("Successfully rolled edit logs."); + System.out.println("New segment starts at txid " + txid); + return 0; + } + /** * Command to enable/disable/check restoring of failed storage replicas in the namenode. * Usage: java DFSAdmin -restoreFailedStorage true|false|check @@ -516,6 +524,7 @@ private void printHelp(String cmd) { "The full syntax is: \n\n" + "hadoop dfsadmin [-report] [-safemode ]\n" + "\t[-saveNamespace]\n" + + "\t[-rollEdits]\n" + "\t[-restoreFailedStorage true|false|check]\n" + "\t[-refreshNodes]\n" + "\t[" + SetQuotaCommand.USAGE + "]\n" + @@ -548,6 +557,10 @@ private void printHelp(String cmd) { "Save current namespace into storage directories and reset edits log.\n" + "\t\tRequires superuser permissions and safe mode.\n"; + String rollEdits = "-rollEdits:\t" + + "Rolls the edit log.\n" + + "\t\tRequires superuser permissions.\n"; + String restoreFailedStorage = "-restoreFailedStorage:\t" + "Set/Unset/Check flag to attempt restore of failed storage replicas if they become available.\n" + "\t\tRequires superuser permissions.\n"; @@ -625,6 +638,8 @@ private void printHelp(String cmd) { System.out.println(safemode); } else if ("saveNamespace".equals(cmd)) { System.out.println(saveNamespace); + } else if ("rollEdits".equals(cmd)) { + System.out.println(rollEdits); } else if ("restoreFailedStorage".equals(cmd)) { System.out.println(restoreFailedStorage); } else if ("refreshNodes".equals(cmd)) { @@ -664,6 +679,7 @@ private void printHelp(String cmd) { System.out.println(report); System.out.println(safemode); System.out.println(saveNamespace); + System.out.println(rollEdits); System.out.println(restoreFailedStorage); System.out.println(refreshNodes); System.out.println(finalizeUpgrade); @@ -859,6 +875,9 @@ private static void printUsage(String cmd) { } else if ("-saveNamespace".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-saveNamespace]"); + } else if ("-rollEdits".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [-rollEdits]"); } else if ("-restoreFailedStorage".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-restoreFailedStorage true|false|check ]"); @@ -913,6 +932,7 @@ private static void printUsage(String cmd) { System.err.println(" [-report]"); System.err.println(" [-safemode enter | leave | get | wait]"); System.err.println(" [-saveNamespace]"); + System.err.println(" [-rollEdits]"); System.err.println(" [-restoreFailedStorage true|false|check]"); System.err.println(" [-refreshNodes]"); System.err.println(" [-finalizeUpgrade]"); @@ -970,6 +990,11 @@ public int run(String[] argv) throws Exception { printUsage(cmd); return exitCode; } + } else if ("-rollEdits".equals(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } } else if ("-restoreFailedStorage".equals(cmd)) { if (argv.length != 2) { printUsage(cmd); @@ -1048,6 +1073,8 @@ public int run(String[] argv) throws Exception { setSafeMode(argv, i); } else if ("-saveNamespace".equals(cmd)) { exitCode = saveNamespace(); + } else if ("-rollEdits".equals(cmd)) { + exitCode = rollEdits(); } else if ("-restoreFailedStorage".equals(cmd)) { exitCode = restoreFaileStorage(argv[i]); } else if ("-refreshNodes".equals(cmd)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 439b0cdb58..8a4fd96d46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -276,6 +276,13 @@ message SaveNamespaceRequestProto { // no parameters message SaveNamespaceResponseProto { // void response } +message RollEditsRequestProto { // no parameters +} + +message RollEditsResponseProto { // response + required uint64 newSegmentTxId = 1; +} + message RestoreFailedStorageRequestProto { required string arg = 1; } @@ -472,6 +479,8 @@ service ClientNamenodeProtocol { returns(SetSafeModeResponseProto); rpc saveNamespace(SaveNamespaceRequestProto) returns(SaveNamespaceResponseProto); + rpc rollEdits(RollEditsRequestProto) + returns(RollEditsResponseProto); rpc restoreFailedStorage(RestoreFailedStorageRequestProto) returns(RestoreFailedStorageResponseProto); rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index f78ede61a9..fd4afb0963 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -15885,6 +15885,23 @@ + + + rollEdits: test rollEdits admin command + + -fs NAMENODE -rollEdits + + + + + + + RegexpComparator + New segment starts at txid \d+ + + + + refreshNodes: to refresh the nodes From 6669feae07e631988b152a24e85c9cb9770d36cb Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Wed, 5 Sep 2012 04:40:33 +0000 Subject: [PATCH 62/62] HADOOP-8764. CMake: HADOOP-8737 broke ARM build. Contributed by Trevor Robinson git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1380984 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-common-project/hadoop-common/src/JNIFlags.cmake | 2 ++ 2 files changed, 4 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index bd1339e6e0..4c24291d75 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -463,6 +463,8 @@ Branch-2 ( Unreleased changes ) HADOOP-8722. Update BUILDING.txt with latest snappy info. (Colin Patrick McCabe via eli) + HADOOP-8764. CMake: HADOOP-8737 broke ARM build. (Trevor Robinson via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HADOOP-8220. ZKFailoverController doesn't handle failure to become active diff --git a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake index 617fccd2b7..aba4c18856 100644 --- a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake +++ b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake @@ -76,6 +76,8 @@ IF("${CMAKE_SYSTEM}" MATCHES "Linux") SET(_java_libarch "i386") ELSEIF (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") SET(_java_libarch "amd64") + ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") + SET(_java_libarch "arm") ELSE() SET(_java_libarch ${CMAKE_SYSTEM_PROCESSOR}) ENDIF()