HADOOP-14485. Redundant 'final' modifier in try-with-resources statement. Contributed by wenxin he.

This commit is contained in:
Brahma Reddy Battula 2017-06-06 23:11:47 +08:00
parent 855e0477b1
commit 19ef3a81f8
7 changed files with 10 additions and 10 deletions

View File

@ -82,7 +82,7 @@ public void clusterDown() {
@Test @Test
public void useHdfsFileSystem() throws IOException { public void useHdfsFileSystem() throws IOException {
try (final FileSystem fs = cluster.getFileSystem()) { try (FileSystem fs = cluster.getFileSystem()) {
simpleReadAfterWrite(fs); simpleReadAfterWrite(fs);
} }
} }
@ -94,10 +94,10 @@ public void simpleReadAfterWrite(final FileSystem fs) throws IOException {
throw new IOException("Mkdirs failed to create " + throw new IOException("Mkdirs failed to create " +
TEST_PATH); TEST_PATH);
} }
try (final FSDataOutputStream out = fs.create(path)) { try (FSDataOutputStream out = fs.create(path)) {
out.writeUTF(TEXT); out.writeUTF(TEXT);
} }
try (final FSDataInputStream in = fs.open(path)) { try (FSDataInputStream in = fs.open(path)) {
final String result = in.readUTF(); final String result = in.readUTF();
Assert.assertEquals("Didn't read back text we wrote.", TEXT, result); Assert.assertEquals("Didn't read back text we wrote.", TEXT, result);
} }
@ -105,7 +105,7 @@ public void simpleReadAfterWrite(final FileSystem fs) throws IOException {
@Test @Test
public void useWebHDFS() throws IOException, URISyntaxException { public void useWebHDFS() throws IOException, URISyntaxException {
try (final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem( try (FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(
cluster.getConfiguration(0), WebHdfsConstants.WEBHDFS_SCHEME)) { cluster.getConfiguration(0), WebHdfsConstants.WEBHDFS_SCHEME)) {
simpleReadAfterWrite(fs); simpleReadAfterWrite(fs);
} }

View File

@ -1425,7 +1425,7 @@ public void testSecureVersionMismatch() throws IOException {
} }
private void checkVersionMismatch() throws IOException { private void checkVersionMismatch() throws IOException {
try (final ServerSocket listenSocket = new ServerSocket()) { try (ServerSocket listenSocket = new ServerSocket()) {
listenSocket.bind(null); listenSocket.bind(null);
InetSocketAddress addr = InetSocketAddress addr =
(InetSocketAddress) listenSocket.getLocalSocketAddress(); (InetSocketAddress) listenSocket.getLocalSocketAddress();

View File

@ -42,7 +42,7 @@ public class TestRollingAverages {
public void testRollingAveragesEmptyRollover() throws Exception { public void testRollingAveragesEmptyRollover() throws Exception {
final MetricsRecordBuilder rb = mockMetricsRecordBuilder(); final MetricsRecordBuilder rb = mockMetricsRecordBuilder();
/* 5s interval and 2 windows */ /* 5s interval and 2 windows */
try (final RollingAverages rollingAverages = try (RollingAverages rollingAverages =
new RollingAverages(5000, 2)) { new RollingAverages(5000, 2)) {
/* Check it initially */ /* Check it initially */
rollingAverages.snapshot(rb, true); rollingAverages.snapshot(rb, true);

View File

@ -254,7 +254,7 @@ public void testClusterSetDatanodeDifferentStorageType() throws IOException {
@Test @Test
public void testClusterNoStorageTypeSetForDatanodes() throws IOException { public void testClusterNoStorageTypeSetForDatanodes() throws IOException {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
try (final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3).build()) { .numDataNodes(3).build()) {
cluster.waitActive(); cluster.waitActive();
ArrayList<DataNode> dataNodes = cluster.getDataNodes(); ArrayList<DataNode> dataNodes = cluster.getDataNodes();

View File

@ -1847,7 +1847,7 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
for(int i = 0; i < lengths.length; i++) { for(int i = 0; i < lengths.length; i++) {
final long size = lengths[i]; final long size = lengths[i];
final Path p = new Path("/file" + i + "_size" + size); final Path p = new Path("/file" + i + "_size" + size);
try(final OutputStream out = dfs.create(p)) { try(OutputStream out = dfs.create(p)) {
for(int j = 0; j < size; j++) { for(int j = 0; j < size; j++) {
out.write(j); out.write(j);
} }

View File

@ -153,7 +153,7 @@ private void verifyFaultInjectionDelayPipeline(
cluster.waitActive(); cluster.waitActive();
final FileSystem fs = cluster.getFileSystem(); final FileSystem fs = cluster.getFileSystem();
try (final FSDataOutputStream out = fs try (FSDataOutputStream out = fs
.create(new Path(baseDir, "test.data"), (short) 2)) { .create(new Path(baseDir, "test.data"), (short) 2)) {
out.write(0x31); out.write(0x31);
out.hflush(); out.hflush();

View File

@ -303,7 +303,7 @@ public void testSubmitApplicationInterrupted() throws IOException {
int pollIntervalMs = 1000; int pollIntervalMs = 1000;
conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS, conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,
pollIntervalMs); pollIntervalMs);
try (final YarnClient client = new MockYarnClient()) { try (YarnClient client = new MockYarnClient()) {
client.init(conf); client.init(conf);
client.start(); client.start();
// Submit the application and then interrupt it while its waiting // Submit the application and then interrupt it while its waiting