diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 949ea42331..29ef6ca6c7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -164,7 +164,7 @@ public void testWorkingDirectory() throws IOException { public void testSyncable() throws IOException { FileSystem fs = fileSys.getRawFileSystem(); Path file = new Path(TEST_ROOT_DIR, "syncable"); - FSDataOutputStream out = fs.create(file);; + FSDataOutputStream out = fs.create(file); final int bytesWritten = 1; byte[] expectedBuf = new byte[] {'0', '1', '2', '3'}; try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java index 48924203f3..fdd622a7f6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java @@ -516,7 +516,7 @@ private static String parameters2String(MyOptions options) { } private static class MyOptions { - String rootDir = GenericTestUtils.getTestDir().getAbsolutePath();; + String rootDir = GenericTestUtils.getTestDir().getAbsolutePath(); String compress = "gz"; String format = "tfile"; int dictSize = 1000; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index 6742425e76..cca40f97c5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -289,7 +289,7 @@ public int run(String[] args) throws Exception { long cpuNanosClient = getTotalCpuTime(ctx.getTestThreads()); long cpuNanosServer = -1; if (server != null) { - cpuNanosServer = getTotalCpuTime(server.getHandlers());; + cpuNanosServer = getTotalCpuTime(server.getHandlers()); } System.out.println("====== Results ======"); System.out.println("Options:\n" + opts); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java index 03fc4cbb54..81abc42a02 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java @@ -111,16 +111,16 @@ public void testFixedAndLocalWhiteList() throws IOException { assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties(InetAddress.getByName("10.119.103.112"))); - assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113")); + assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113")); - assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121")); + assertEquals(wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121")); - assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0")); - assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.222.103.121")); - assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0")); - assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.221")); - assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222")); - assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));; + assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0")); + assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.222.103.121")); + assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0")); + assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.221")); + assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222")); + assertEquals(wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1")); TestFileBasedIPList.removeFile("fixedwhitelist.txt"); TestFileBasedIPList.removeFile("variablewhitelist.txt"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 548525ba01..58c5ad39b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -745,7 +745,7 @@ private void constructHttpServerURI(GetEditLogManifestResponseProto ret) { URI uri = URI.create(ret.getFromURL()); httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort()); } else { - httpServerURL = getHttpServerURI("http", ret.getHttpPort());; + httpServerURL = getHttpServerURI("http", ret.getHttpPort()); } } @@ -754,7 +754,7 @@ private void constructHttpServerURI(GetJournalStateResponseProto ret) { URI uri = URI.create(ret.getFromURL()); httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort()); } else { - httpServerURL = getHttpServerURI("http", ret.getHttpPort());; + httpServerURL = getHttpServerURI("http", ret.getHttpPort()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index a0c8e9840f..e95200b35a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -803,7 +803,7 @@ enum Command { geteditsizeOpt = new Option("geteditsize", "return the number of uncheckpointed transactions on the NameNode"); checkpointOpt = OptionBuilder.withArgName("force") - .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");; + .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint"); formatOpt = new Option("format", "format the local storage during startup"); helpOpt = new Option("h", "help", false, "get help information"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 088a47e893..1a763b5bae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -99,7 +99,7 @@ public void testRetryAddBlockWhileInChooseTarget() throws Exception { HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, onRetryBlock); } finally { - ns.readUnlock();; + ns.readUnlock(); } DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock( ns.getBlockManager(), src, null, null, null, r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java index cff4e1f32f..33fefca5fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java @@ -91,7 +91,7 @@ public void testSaveNamespace() throws IOException { log.scanLog(Long.MAX_VALUE, true); long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1; assertEquals("In-progress log " + log + " should have 5 transactions", - 5, numTransactions);; + 5, numTransactions); } // Saving image in safe mode should succeed diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 064d5ae839..1265863e11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -2613,7 +2613,7 @@ private static List getCheckpointCurrentDirs(SecondaryNameNode secondary) } private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) { - CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage());; + CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage()); secondary1.setFSImage(spy); return spy; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java index f5a112c7ac..b8cc32e43e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java @@ -150,7 +150,7 @@ public void testWhenSomeNodesAreNotGood() throws Exception { d.stopDecommission(); BlockLocation[] locations = getBlockLocations(p); - Assert.assertEquals(replication, locations[0].getNames().length);; + Assert.assertEquals(replication, locations[0].getNames().length); //also make sure that the datanode[0] is not in the list of hosts for (int i = 0; i < replication; i++) { final String loc = locations[0].getNames()[i]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 2ef48a31f9..bd54ba2e98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -317,7 +317,7 @@ public Void run() throws Exception { longUgi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - token.cancel(conf);; + token.cancel(conf); return null; } }); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index fa4396d77f..06d9fbbe7a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -150,8 +150,8 @@ public void readFields(DataInput in) throws IOException { * @param */ class TrackedRecordReader - implements RecordReader { - private RecordReader rawIn; + implements RecordReader { + private RecordReader rawIn; private Counters.Counter fileInputByteCounter; private Counters.Counter inputRecordCounter; private TaskReporter reporter; @@ -240,7 +240,7 @@ private long getInputBytes(List stats) { * This class skips the records based on the failed ranges from previous * attempts. */ - class SkippingRecordReader extends TrackedRecordReader { + class SkippingRecordReader extends TrackedRecordReader { private SkipRangeIterator skipIt; private SequenceFile.Writer skipWriter; private boolean toWriteSkipRecs; @@ -930,7 +930,7 @@ public static class MapOutputBuffer // spill accounting private int maxRec; private int softLimit; - boolean spillInProgress;; + boolean spillInProgress; int bufferRemaining; volatile Throwable sortSpillException = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java index 90b4ae0f54..d574bf694f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java @@ -96,7 +96,7 @@ public void reset() { * expected to be used by the a combiner. */ public ArrayList getCombinerOutput() { - ArrayList retv = new ArrayList(1);; + ArrayList retv = new ArrayList(1); retv.add("" + maxVal); return retv; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java index a00146e680..aa3acfbc54 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java @@ -123,7 +123,7 @@ public class HsTasksBlock extends HtmlBlock { long sortFinishTime = -1; long attemptFinishTime = -1; long elapsedShuffleTime = -1; - long elapsedSortTime = -1;; + long elapsedSortTime = -1; long elapsedReduceTime = -1; long attemptElapsed = -1; TaskAttempt successful = info.getSuccessful(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java index 35992f5de0..3aedfbf1ec 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java @@ -70,7 +70,7 @@ private static void createBigMapInputFile(Configuration conf, FileSystem fs, BytesWritable.class, BytesWritable.class, CompressionType.NONE); long numBytesToWrite = fileSizeInMB * 1024 * 1024; - int minKeySize = conf.getInt(MIN_KEY, 10);; + int minKeySize = conf.getInt(MIN_KEY, 10); int keySizeRange = conf.getInt(MAX_KEY, 1000) - minKeySize; int minValueSize = conf.getInt(MIN_VALUE, 0); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java index 5df1af5cce..3372c8f28b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java @@ -179,8 +179,7 @@ public void testMR() throws Exception { // Hack for local FS that does not have the concept of a 'mounting point' if (isLocalFS()) { - String localPathRoot = System.getProperty("test.build.data","/tmp") - .toString().replace(' ', '+');; + String localPathRoot = System.getProperty("test.build.data", "/tmp").replace(' ', '+'); inDir = new Path(localPathRoot, inDir); outDir = new Path(localPathRoot, outDir); } @@ -217,8 +216,7 @@ private String launchWordCount(JobConf conf, // Hack for local FS that does not have the concept of a 'mounting point' if (isLocalFS()) { - String localPathRoot = System.getProperty("test.build.data","/tmp") - .toString().replace(' ', '+');; + String localPathRoot = System.getProperty("test.build.data", "/tmp").replace(' ', '+'); inDir = new Path(localPathRoot, inDir); outDir = new Path(localPathRoot, outDir); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestValueIterReset.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestValueIterReset.java index 4bcacd8ec4..343e81f163 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestValueIterReset.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestValueIterReset.java @@ -437,7 +437,7 @@ private static int test3(IntWritable key, int count = 0; while (values.hasNext()) { - i = values.next();; + i = values.next(); LOG.info(key + ":" + i); if (count == 5) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java index 8d7e7a0a97..8299ba161a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java @@ -84,7 +84,7 @@ static LongLong multiplication(final LongLong r, final long a, final long b) { final long v = x1*y1; final long tmp = (t - u)>>>1; - result.d0 = ((t + u)>>>1) - v + ((tmp << MID) & FULL_MASK);; + result.d0 = ((t + u)>>>1) - v + ((tmp << MID) & FULL_MASK); result.d1 = v + (tmp >> MID); return result; */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 621540677c..22c7cc34bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -1928,7 +1928,7 @@ public void testGetQueueInfoWithNonExistedQueue() throws Exception { QueueCLI cli = createAndGetQueueCLI(); when(client.getQueueInfo(any(String.class))).thenReturn(null); int result = cli.run(new String[] { "-status", queueName }); - assertEquals(-1, result);; + assertEquals(-1, result); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Cannot get queue from RM by queueName = " + queueName diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationAllocationStatePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationAllocationStatePBImpl.java index 88e39ec994..48f62c1bae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationAllocationStatePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationAllocationStatePBImpl.java @@ -62,7 +62,7 @@ public class ReservationAllocationStatePBImpl extends ReservationAllocationState { private ReservationAllocationStateProto proto = - ReservationAllocationStateProto.getDefaultInstance();; + ReservationAllocationStateProto.getDefaultInstance(); private ReservationAllocationStateProto.Builder builder = null; private boolean viaProto = false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ConfiguredYarnAuthorizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ConfiguredYarnAuthorizer.java index 615ecb0106..227f1c960a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ConfiguredYarnAuthorizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ConfiguredYarnAuthorizer.java @@ -43,7 +43,7 @@ public class ConfiguredYarnAuthorizer extends YarnAuthorizationProvider { private final ConcurrentMap> allAcls = new ConcurrentHashMap<>(); private volatile AccessControlList adminAcl = null; - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();; + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final ReentrantReadWriteLock.ReadLock readLock = lock.readLock(); private final ReentrantReadWriteLock.WriteLock writeLock = lock.writeLock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index c13efadff1..6132e579ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -449,7 +449,7 @@ private enum State { COMPLETE }; - private State state;; + private State state; private final String cwd; private final String jobName; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index 9439c538af..1c4d60962e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -496,7 +496,7 @@ private void containerBasedPreemptOrKill(CSQueue root, Map> toPreempt = new HashMap<>(); Map>> toPreemptPerSelector = new HashMap<>();; + Set>> toPreemptPerSelector = new HashMap<>(); for (PreemptionCandidatesSelector selector : candidatesSelectionPolicies) { long startTime = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index cc5e8af176..be0dbf2a1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -1772,7 +1772,7 @@ private static ContainerLaunchContext mockContainerLaunchContext( RecordFactory recordFactory) { ContainerLaunchContext amContainer = recordFactory.newRecordInstance( ContainerLaunchContext.class); - amContainer.setApplicationACLs(new HashMap());; + amContainer.setApplicationACLs(new HashMap()); return amContainer; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java index bc540b0ba7..457e9d47dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java @@ -109,7 +109,7 @@ public class TestRMAdminService { - private Configuration configuration;; + private Configuration configuration; private MockRM rm = null; private FileSystem fs; private Path workingPath;