diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java index e2f8b842f3..ebebd25003 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java @@ -18,6 +18,18 @@ package org.apache.hadoop.fs.http.client; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Writer; +import java.net.URL; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.Collection; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; @@ -44,18 +56,6 @@ import org.mortbay.jetty.Server; import org.mortbay.jetty.webapp.WebAppContext; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.Writer; -import java.net.URL; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.Collection; - @RunWith(value = Parameterized.class) public class TestHttpFSFileSystem extends HFSTestCase { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java index 7c5b94c7c7..95c005976c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.http.client; +import java.net.URI; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; @@ -26,8 +28,6 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.net.URI; - @RunWith(value = Parameterized.class) public class TestWebhdfsFileSystem extends TestHttpFSFileSystem { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java index 9996e0bea0..947f928a0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java @@ -18,15 +18,15 @@ package org.apache.hadoop.fs.http.server; -import org.apache.hadoop.fs.http.client.HttpFSFileSystem; -import org.junit.Test; -import org.mockito.Mockito; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.fs.http.client.HttpFSFileSystem; +import org.junit.Test; +import org.mockito.Mockito; + public class TestCheckUploadContentTypeFilter { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index ff525e643a..099eb4bf81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -18,7 +18,23 @@ package org.apache.hadoop.fs.http.server; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.Writer; +import java.net.HttpURLConnection; +import java.net.URL; +import java.text.MessageFormat; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -38,20 +54,6 @@ import org.mortbay.jetty.Server; import org.mortbay.jetty.webapp.WebAppContext; -import java.io.BufferedReader; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.Writer; -import java.net.HttpURLConnection; -import java.net.URL; -import java.text.MessageFormat; -import java.util.Arrays; -import java.util.List; - public class TestHttpFSServer extends HFSTestCase { @Test @@ -103,9 +105,9 @@ public List getGroups(String user) throws IOException { } private void createHttpFSServer() throws Exception { File homeDir = TestDirHelper.getTestDir(); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath()); File secretFile = new File(new File(homeDir, "conf"), "secret"); @@ -157,23 +159,23 @@ public void instrumentation() throws Exception { URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody")); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED); url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); String line = reader.readLine(); reader.close(); - Assert.assertTrue(line.contains("\"counters\":{")); + assertTrue(line.contains("\"counters\":{")); url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); } @Test @@ -187,7 +189,7 @@ public void testHdfsAccess() throws Exception { URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user)); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); @@ -208,7 +210,7 @@ public void testGlobFilter() throws Exception { URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user)); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); @@ -228,7 +230,7 @@ public void testPutNoOperation() throws Exception { conn.setDoInput(true); conn.setDoOutput(true); conn.setRequestMethod("PUT"); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java index 6079cf256f..1520af8776 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java @@ -19,12 +19,14 @@ package org.apache.hadoop.lib.lang; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.concurrent.Callable; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; + public class TestRunnableCallable extends HTestCase { public static class R implements Runnable { @@ -59,14 +61,14 @@ public void runnable() throws Exception { R r = new R(); RunnableCallable rc = new RunnableCallable(r); rc.run(); - Assert.assertTrue(r.RUN); + assertTrue(r.RUN); r = new R(); rc = new RunnableCallable(r); rc.call(); - Assert.assertTrue(r.RUN); + assertTrue(r.RUN); - Assert.assertEquals(rc.toString(), "R"); + assertEquals(rc.toString(), "R"); } @Test @@ -74,14 +76,14 @@ public void callable() throws Exception { C c = new C(); RunnableCallable rc = new RunnableCallable(c); rc.run(); - Assert.assertTrue(c.RUN); + assertTrue(c.RUN); c = new C(); rc = new RunnableCallable(c); rc.call(); - Assert.assertTrue(c.RUN); + assertTrue(c.RUN); - Assert.assertEquals(rc.toString(), "C"); + assertEquals(rc.toString(), "C"); } @Test(expected = RuntimeException.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java index 0feca3044b..59d02e3d55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java @@ -19,7 +19,9 @@ package org.apache.hadoop.lib.lang; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + import org.apache.hadoop.test.HTestCase; import org.junit.Test; @@ -37,26 +39,26 @@ public String getTemplate() { @Test public void testXException() throws Exception { XException ex = new XException(TestERROR.TC); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), "TC: {0}"); - Assert.assertNull(ex.getCause()); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), "TC: {0}"); + assertNull(ex.getCause()); ex = new XException(TestERROR.TC, "msg"); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), "TC: msg"); - Assert.assertNull(ex.getCause()); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), "TC: msg"); + assertNull(ex.getCause()); Exception cause = new Exception(); ex = new XException(TestERROR.TC, cause); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), "TC: " + cause.toString()); - Assert.assertEquals(ex.getCause(), cause); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), "TC: " + cause.toString()); + assertEquals(ex.getCause(), cause); XException xcause = ex; ex = new XException(xcause); - Assert.assertEquals(ex.getError(), TestERROR.TC); - Assert.assertEquals(ex.getMessage(), xcause.getMessage()); - Assert.assertEquals(ex.getCause(), xcause); + assertEquals(ex.getError(), TestERROR.TC); + assertEquals(ex.getMessage(), xcause.getMessage()); + assertEquals(ex.getCause(), xcause); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java index 037fd63a4c..402884bfbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java @@ -18,7 +18,10 @@ package org.apache.hadoop.lib.server; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.HTestCase; import org.junit.Test; @@ -47,9 +50,9 @@ public Class getInterface() { @Test public void baseService() throws Exception { BaseService service = new MyService(); - Assert.assertNull(service.getInterface()); - Assert.assertEquals(service.getPrefix(), "myservice"); - Assert.assertEquals(service.getServiceDependencies().length, 0); + assertNull(service.getInterface()); + assertEquals(service.getPrefix(), "myservice"); + assertEquals(service.getServiceDependencies().length, 0); Server server = Mockito.mock(Server.class); Configuration conf = new Configuration(false); @@ -60,9 +63,9 @@ public void baseService() throws Exception { Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice."); service.init(server); - Assert.assertEquals(service.getPrefixedName("foo"), "server.myservice.foo"); - Assert.assertEquals(service.getServiceConfig().size(), 1); - Assert.assertEquals(service.getServiceConfig().get("foo"), "FOO"); - Assert.assertTrue(MyService.INIT); + assertEquals(service.getPrefixedName("foo"), "server.myservice.foo"); + assertEquals(service.getServiceConfig().size(), 1); + assertEquals(service.getServiceConfig().get("foo"), "FOO"); + assertTrue(MyService.INIT); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java index efd366b22d..2e28441d71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java @@ -18,16 +18,12 @@ package org.apache.hadoop.lib.server; -import junit.framework.Assert; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.lib.lang.XException; -import org.apache.hadoop.test.HTestCase; -import org.apache.hadoop.test.TestDir; -import org.apache.hadoop.test.TestDirHelper; -import org.apache.hadoop.test.TestException; -import org.apache.hadoop.util.StringUtils; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileOutputStream; @@ -39,50 +35,60 @@ import java.util.Arrays; import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.lib.lang.XException; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestException; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + public class TestServer extends HTestCase { @Test @TestDir public void constructorsGetters() throws Exception { Server server = new Server("server", "/a", "/b", "/c", "/d", new Configuration(false)); - Assert.assertEquals(server.getHomeDir(), "/a"); - Assert.assertEquals(server.getConfigDir(), "/b"); - Assert.assertEquals(server.getLogDir(), "/c"); - Assert.assertEquals(server.getTempDir(), "/d"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNotNull(server.getConfig()); + assertEquals(server.getHomeDir(), "/a"); + assertEquals(server.getConfigDir(), "/b"); + assertEquals(server.getLogDir(), "/c"); + assertEquals(server.getTempDir(), "/d"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNotNull(server.getConfig()); server = new Server("server", "/a", "/b", "/c", "/d"); - Assert.assertEquals(server.getHomeDir(), "/a"); - Assert.assertEquals(server.getConfigDir(), "/b"); - Assert.assertEquals(server.getLogDir(), "/c"); - Assert.assertEquals(server.getTempDir(), "/d"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNull(server.getConfig()); + assertEquals(server.getHomeDir(), "/a"); + assertEquals(server.getConfigDir(), "/b"); + assertEquals(server.getLogDir(), "/c"); + assertEquals(server.getTempDir(), "/d"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNull(server.getConfig()); server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false)); - Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); - Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); - Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); - Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNotNull(server.getConfig()); + assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); + assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); + assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); + assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNotNull(server.getConfig()); server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath()); - Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); - Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); - Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); - Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); - Assert.assertEquals(server.getName(), "server"); - Assert.assertEquals(server.getPrefix(), "server"); - Assert.assertEquals(server.getPrefixedName("name"), "server.name"); - Assert.assertNull(server.getConfig()); + assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); + assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); + assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); + assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); + assertEquals(server.getName(), "server"); + assertEquals(server.getPrefix(), "server"); + assertEquals(server.getPrefixedName("name"), "server.name"); + assertNull(server.getConfig()); } @Test @@ -113,9 +119,9 @@ public void initHomeDirNotDir() throws Exception { @TestDir public void initNoConfigDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); Configuration conf = new Configuration(false); conf.set("server.services", TestService.class.getName()); Server server = new Server("server", homeDir.getAbsolutePath(), conf); @@ -127,9 +133,9 @@ public void initNoConfigDir() throws Exception { @TestDir public void initConfigDirNotDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); File configDir = new File(homeDir, "conf"); new FileOutputStream(configDir).close(); Configuration conf = new Configuration(false); @@ -143,9 +149,9 @@ public void initConfigDirNotDir() throws Exception { @TestDir public void initNoLogDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); Configuration conf = new Configuration(false); conf.set("server.services", TestService.class.getName()); Server server = new Server("server", homeDir.getAbsolutePath(), conf); @@ -157,9 +163,9 @@ public void initNoLogDir() throws Exception { @TestDir public void initLogDirNotDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "temp").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "temp").mkdir()); File logDir = new File(homeDir, "log"); new FileOutputStream(logDir).close(); Configuration conf = new Configuration(false); @@ -173,9 +179,9 @@ public void initLogDirNotDir() throws Exception { @TestDir public void initNoTempDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); Configuration conf = new Configuration(false); conf.set("server.services", TestService.class.getName()); Server server = new Server("server", homeDir.getAbsolutePath(), conf); @@ -187,9 +193,9 @@ public void initNoTempDir() throws Exception { @TestDir public void initTempDirNotDir() throws Exception { File homeDir = new File(TestDirHelper.getTestDir(), "home"); - Assert.assertTrue(homeDir.mkdir()); - Assert.assertTrue(new File(homeDir, "conf").mkdir()); - Assert.assertTrue(new File(homeDir, "log").mkdir()); + assertTrue(homeDir.mkdir()); + assertTrue(new File(homeDir, "conf").mkdir()); + assertTrue(new File(homeDir, "log").mkdir()); File tempDir = new File(homeDir, "temp"); new FileOutputStream(tempDir).close(); Configuration conf = new Configuration(false); @@ -204,7 +210,7 @@ public void initTempDirNotDir() throws Exception { public void siteFileNotAFile() throws Exception { String homeDir = TestDirHelper.getTestDir().getAbsolutePath(); File siteFile = new File(homeDir, "server-site.xml"); - Assert.assertTrue(siteFile.mkdir()); + assertTrue(siteFile.mkdir()); Server server = new Server("server", homeDir, homeDir, homeDir, homeDir); server.init(); } @@ -234,12 +240,12 @@ public LifeCycleService() { @Override protected void init() throws ServiceException { - Assert.assertEquals(getServer().getStatus(), Server.Status.BOOTING); + assertEquals(getServer().getStatus(), Server.Status.BOOTING); } @Override public void destroy() { - Assert.assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN); + assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN); super.destroy(); } @@ -255,12 +261,12 @@ public void lifeCycle() throws Exception { Configuration conf = new Configuration(false); conf.set("server.services", LifeCycleService.class.getName()); Server server = createServer(conf); - Assert.assertEquals(server.getStatus(), Server.Status.UNDEF); + assertEquals(server.getStatus(), Server.Status.UNDEF); server.init(); - Assert.assertNotNull(server.get(LifeCycleService.class)); - Assert.assertEquals(server.getStatus(), Server.Status.NORMAL); + assertNotNull(server.get(LifeCycleService.class)); + assertEquals(server.getStatus(), Server.Status.NORMAL); server.destroy(); - Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN); + assertEquals(server.getStatus(), Server.Status.SHUTDOWN); } @Test @@ -270,7 +276,7 @@ public void startWithStatusNotNormal() throws Exception { conf.set("server.startup.status", "ADMIN"); Server server = createServer(conf); server.init(); - Assert.assertEquals(server.getStatus(), Server.Status.ADMIN); + assertEquals(server.getStatus(), Server.Status.ADMIN); server.destroy(); } @@ -334,7 +340,7 @@ public void changeStatus() throws Exception { Server server = createServer(conf); server.init(); server.setStatus(Server.Status.ADMIN); - Assert.assertTrue(TestService.LIFECYCLE.contains("serverStatusChange")); + assertTrue(TestService.LIFECYCLE.contains("serverStatusChange")); } @Test @@ -357,7 +363,7 @@ public void setSameStatus() throws Exception { server.init(); TestService.LIFECYCLE.clear(); server.setStatus(server.getStatus()); - Assert.assertFalse(TestService.LIFECYCLE.contains("serverStatusChange")); + assertFalse(TestService.LIFECYCLE.contains("serverStatusChange")); } @Test @@ -368,9 +374,9 @@ public void serviceLifeCycle() throws Exception { conf.set("server.services", TestService.class.getName()); Server server = createServer(conf); server.init(); - Assert.assertNotNull(server.get(TestService.class)); + assertNotNull(server.get(TestService.class)); server.destroy(); - Assert.assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy")); + assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy")); } @Test @@ -379,7 +385,7 @@ public void loadingDefaultConfig() throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); Server server = new Server("testserver", dir, dir, dir, dir); server.init(); - Assert.assertEquals(server.getConfig().get("testserver.a"), "default"); + assertEquals(server.getConfig().get("testserver.a"), "default"); } @Test @@ -392,7 +398,7 @@ public void loadingSiteConfig() throws Exception { w.close(); Server server = new Server("testserver", dir, dir, dir, dir); server.init(); - Assert.assertEquals(server.getConfig().get("testserver.a"), "site"); + assertEquals(server.getConfig().get("testserver.a"), "site"); } @Test @@ -407,7 +413,7 @@ public void loadingSysPropConfig() throws Exception { w.close(); Server server = new Server("testserver", dir, dir, dir, dir); server.init(); - Assert.assertEquals(server.getConfig().get("testserver.a"), "sysprop"); + assertEquals(server.getConfig().get("testserver.a"), "sysprop"); } finally { System.getProperties().remove("testserver.a"); } @@ -633,7 +639,7 @@ public void services() throws Exception { conf = new Configuration(false); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(ORDER.size(), 0); + assertEquals(ORDER.size(), 0); // 2 services init/destroy ORDER.clear(); @@ -643,17 +649,17 @@ public void services() throws Exception { conf.set("server.services", services); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); - Assert.assertEquals(server.get(MyService3.class).getInterface(), MyService3.class); - Assert.assertEquals(ORDER.size(), 4); - Assert.assertEquals(ORDER.get(0), "s1.init"); - Assert.assertEquals(ORDER.get(1), "s3.init"); - Assert.assertEquals(ORDER.get(2), "s1.postInit"); - Assert.assertEquals(ORDER.get(3), "s3.postInit"); + assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); + assertEquals(server.get(MyService3.class).getInterface(), MyService3.class); + assertEquals(ORDER.size(), 4); + assertEquals(ORDER.get(0), "s1.init"); + assertEquals(ORDER.get(1), "s3.init"); + assertEquals(ORDER.get(2), "s1.postInit"); + assertEquals(ORDER.get(3), "s3.postInit"); server.destroy(); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s3.destroy"); - Assert.assertEquals(ORDER.get(5), "s1.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s3.destroy"); + assertEquals(ORDER.get(5), "s1.destroy"); // 3 services, 2nd one fails on init ORDER.clear(); @@ -665,16 +671,16 @@ public void services() throws Exception { server = new Server("server", dir, dir, dir, dir, conf); try { server.init(); - Assert.fail(); + fail(); } catch (ServerException ex) { - Assert.assertEquals(MyService2.class, ex.getError().getClass()); + assertEquals(MyService2.class, ex.getError().getClass()); } catch (Exception ex) { - Assert.fail(); + fail(); } - Assert.assertEquals(ORDER.size(), 3); - Assert.assertEquals(ORDER.get(0), "s1.init"); - Assert.assertEquals(ORDER.get(1), "s2.init"); - Assert.assertEquals(ORDER.get(2), "s1.destroy"); + assertEquals(ORDER.size(), 3); + assertEquals(ORDER.get(0), "s1.init"); + assertEquals(ORDER.get(1), "s2.init"); + assertEquals(ORDER.get(2), "s1.destroy"); // 2 services one fails on destroy ORDER.clear(); @@ -683,15 +689,15 @@ public void services() throws Exception { conf.set("server.services", services); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(ORDER.size(), 4); - Assert.assertEquals(ORDER.get(0), "s1.init"); - Assert.assertEquals(ORDER.get(1), "s5.init"); - Assert.assertEquals(ORDER.get(2), "s1.postInit"); - Assert.assertEquals(ORDER.get(3), "s5.postInit"); + assertEquals(ORDER.size(), 4); + assertEquals(ORDER.get(0), "s1.init"); + assertEquals(ORDER.get(1), "s5.init"); + assertEquals(ORDER.get(2), "s1.postInit"); + assertEquals(ORDER.get(3), "s5.postInit"); server.destroy(); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s5.destroy"); - Assert.assertEquals(ORDER.get(5), "s1.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s5.destroy"); + assertEquals(ORDER.get(5), "s1.destroy"); // service override via ext @@ -705,16 +711,16 @@ public void services() throws Exception { server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); - Assert.assertEquals(ORDER.size(), 4); - Assert.assertEquals(ORDER.get(0), "s1a.init"); - Assert.assertEquals(ORDER.get(1), "s3.init"); - Assert.assertEquals(ORDER.get(2), "s1a.postInit"); - Assert.assertEquals(ORDER.get(3), "s3.postInit"); + assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); + assertEquals(ORDER.size(), 4); + assertEquals(ORDER.get(0), "s1a.init"); + assertEquals(ORDER.get(1), "s3.init"); + assertEquals(ORDER.get(2), "s1a.postInit"); + assertEquals(ORDER.get(3), "s3.postInit"); server.destroy(); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s3.destroy"); - Assert.assertEquals(ORDER.get(5), "s1a.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s3.destroy"); + assertEquals(ORDER.get(5), "s1a.destroy"); // service override via setService ORDER.clear(); @@ -725,16 +731,16 @@ public void services() throws Exception { server.init(); server.setService(MyService1a.class); - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s1.destroy"); - Assert.assertEquals(ORDER.get(5), "s1a.init"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s1.destroy"); + assertEquals(ORDER.get(5), "s1a.init"); - Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); + assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); server.destroy(); - Assert.assertEquals(ORDER.size(), 8); - Assert.assertEquals(ORDER.get(6), "s3.destroy"); - Assert.assertEquals(ORDER.get(7), "s1a.destroy"); + assertEquals(ORDER.size(), 8); + assertEquals(ORDER.get(6), "s3.destroy"); + assertEquals(ORDER.get(7), "s1a.destroy"); // service add via setService ORDER.clear(); @@ -745,16 +751,16 @@ public void services() throws Exception { server.init(); server.setService(MyService5.class); - Assert.assertEquals(ORDER.size(), 5); - Assert.assertEquals(ORDER.get(4), "s5.init"); + assertEquals(ORDER.size(), 5); + assertEquals(ORDER.get(4), "s5.init"); - Assert.assertEquals(server.get(MyService5.class).getClass(), MyService5.class); + assertEquals(server.get(MyService5.class).getClass(), MyService5.class); server.destroy(); - Assert.assertEquals(ORDER.size(), 8); - Assert.assertEquals(ORDER.get(5), "s5.destroy"); - Assert.assertEquals(ORDER.get(6), "s3.destroy"); - Assert.assertEquals(ORDER.get(7), "s1.destroy"); + assertEquals(ORDER.size(), 8); + assertEquals(ORDER.get(5), "s5.destroy"); + assertEquals(ORDER.get(6), "s3.destroy"); + assertEquals(ORDER.get(7), "s1.destroy"); // service add via setService exception ORDER.clear(); @@ -765,15 +771,15 @@ public void services() throws Exception { server.init(); try { server.setService(MyService7.class); - Assert.fail(); + fail(); } catch (ServerException ex) { - Assert.assertEquals(ServerException.ERROR.S09, ex.getError()); + assertEquals(ServerException.ERROR.S09, ex.getError()); } catch (Exception ex) { - Assert.fail(); + fail(); } - Assert.assertEquals(ORDER.size(), 6); - Assert.assertEquals(ORDER.get(4), "s3.destroy"); - Assert.assertEquals(ORDER.get(5), "s1.destroy"); + assertEquals(ORDER.size(), 6); + assertEquals(ORDER.get(4), "s3.destroy"); + assertEquals(ORDER.get(5), "s1.destroy"); // service with dependency ORDER.clear(); @@ -782,8 +788,8 @@ public void services() throws Exception { conf.set("server.services", services); server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); - Assert.assertEquals(server.get(MyService6.class).getInterface(), MyService6.class); + assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); + assertEquals(server.get(MyService6.class).getInterface(), MyService6.class); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java index 72913eebb5..6b7c6286d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java @@ -18,15 +18,15 @@ package org.apache.hadoop.lib.server; +import java.util.Arrays; +import java.util.Collection; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.HTestCase; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.util.Arrays; -import java.util.Collection; - @RunWith(value = Parameterized.class) public class TestServerConstructor extends HTestCase { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java index b8689c9d6e..192fdd1704 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java @@ -18,7 +18,16 @@ package org.apache.hadoop.lib.service.hadoop; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -38,12 +47,6 @@ import org.junit.Before; import org.junit.Test; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.util.Arrays; - public class TestFileSystemAccessService extends HFSTestCase { private void createHadoopConf(Configuration hadoopConf) throws Exception { @@ -71,7 +74,7 @@ public void simpleSecurity() throws Exception { conf.set("server.services", services); Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertNotNull(server.get(FileSystemAccess.class)); + assertNotNull(server.get(FileSystemAccess.class)); server.destroy(); } @@ -148,7 +151,7 @@ public void serviceHadoopConf() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); - Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO"); + assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO"); server.destroy(); } @@ -174,7 +177,7 @@ public void serviceHadoopConfCustomDir() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); - Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR"); + assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR"); server.destroy(); } @@ -245,15 +248,15 @@ public void createFileSystem() throws Exception { server.init(); FileSystemAccess hadoop = server.get(FileSystemAccess.class); FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration()); - Assert.assertNotNull(fs); + assertNotNull(fs); fs.mkdirs(new Path("/tmp/foo")); hadoop.releaseFileSystem(fs); try { fs.mkdirs(new Path("/tmp/foo")); - Assert.fail(); + fail(); } catch (IOException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } server.destroy(); } @@ -288,10 +291,10 @@ public Void execute(FileSystem fs) throws IOException { }); try { fsa[0].mkdirs(new Path("/tmp/foo")); - Assert.fail(); + fail(); } catch (IOException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } server.destroy(); } @@ -351,19 +354,19 @@ public Void execute(FileSystem fs) throws IOException { throw new IOException(); } }); - Assert.fail(); + fail(); } catch (FileSystemAccessException ex) { - Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03); + assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03); } catch (Exception ex) { - Assert.fail(); + fail(); } try { fsa[0].mkdirs(new Path("/tmp/foo")); - Assert.fail(); + fail(); } catch (IOException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java index 5bd036339b..c609fefc80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java @@ -18,7 +18,16 @@ package org.apache.hadoop.lib.service.instrumentation; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.StringWriter; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.service.Instrumentation; @@ -32,11 +41,6 @@ import org.json.simple.parser.JSONParser; import org.junit.Test; -import java.io.StringWriter; -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - public class TestInstrumentationService extends HTestCase { @Override @@ -47,51 +51,51 @@ protected float getWaitForRatio() { @Test public void cron() { InstrumentationService.Cron cron = new InstrumentationService.Cron(); - Assert.assertEquals(cron.start, 0); - Assert.assertEquals(cron.lapStart, 0); - Assert.assertEquals(cron.own, 0); - Assert.assertEquals(cron.total, 0); + assertEquals(cron.start, 0); + assertEquals(cron.lapStart, 0); + assertEquals(cron.own, 0); + assertEquals(cron.total, 0); long begin = Time.now(); - Assert.assertEquals(cron.start(), cron); - Assert.assertEquals(cron.start(), cron); - Assert.assertEquals(cron.start, begin, 20); - Assert.assertEquals(cron.start, cron.lapStart); + assertEquals(cron.start(), cron); + assertEquals(cron.start(), cron); + assertEquals(cron.start, begin, 20); + assertEquals(cron.start, cron.lapStart); sleep(100); - Assert.assertEquals(cron.stop(), cron); + assertEquals(cron.stop(), cron); long end = Time.now(); long delta = end - begin; - Assert.assertEquals(cron.own, delta, 20); - Assert.assertEquals(cron.total, 0); - Assert.assertEquals(cron.lapStart, 0); + assertEquals(cron.own, delta, 20); + assertEquals(cron.total, 0); + assertEquals(cron.lapStart, 0); sleep(100); long reStart = Time.now(); cron.start(); - Assert.assertEquals(cron.start, begin, 20); - Assert.assertEquals(cron.lapStart, reStart, 20); + assertEquals(cron.start, begin, 20); + assertEquals(cron.lapStart, reStart, 20); sleep(100); cron.stop(); long reEnd = Time.now(); delta += reEnd - reStart; - Assert.assertEquals(cron.own, delta, 20); - Assert.assertEquals(cron.total, 0); - Assert.assertEquals(cron.lapStart, 0); + assertEquals(cron.own, delta, 20); + assertEquals(cron.total, 0); + assertEquals(cron.lapStart, 0); cron.end(); - Assert.assertEquals(cron.total, reEnd - begin, 20); + assertEquals(cron.total, reEnd - begin, 20); try { cron.start(); - Assert.fail(); + fail(); } catch (IllegalStateException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } try { cron.stop(); - Assert.fail(); + fail(); } catch (IllegalStateException ex) { } catch (Exception ex) { - Assert.fail(); + fail(); } } @@ -135,10 +139,10 @@ public void timer() throws Exception { timer.addCron(cron); long[] values = timer.getValues(); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); cron = new InstrumentationService.Cron(); @@ -168,10 +172,10 @@ public void timer() throws Exception { timer.addCron(cron); values = timer.getValues(); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); avgTotal = totalDelta; avgOwn = ownDelta; @@ -205,27 +209,27 @@ public void timer() throws Exception { cron.stop(); timer.addCron(cron); values = timer.getValues(); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); - Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); JSONObject json = (JSONObject) new JSONParser().parse(timer.toJSONString()); - Assert.assertEquals(json.size(), 4); - Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); - Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); - Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); - Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); + assertEquals(json.size(), 4); + assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); + assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); + assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); + assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); StringWriter writer = new StringWriter(); timer.writeJSONString(writer); writer.close(); json = (JSONObject) new JSONParser().parse(writer.toString()); - Assert.assertEquals(json.size(), 4); - Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); - Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); - Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); - Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); + assertEquals(json.size(), 4); + assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); + assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); + assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); + assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); } @Test @@ -240,34 +244,34 @@ public Long getValue() { InstrumentationService.Sampler sampler = new InstrumentationService.Sampler(); sampler.init(4, var); - Assert.assertEquals(sampler.getRate(), 0f, 0.0001); + assertEquals(sampler.getRate(), 0f, 0.0001); sampler.sample(); - Assert.assertEquals(sampler.getRate(), 0f, 0.0001); + assertEquals(sampler.getRate(), 0f, 0.0001); value[0] = 1; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001); + assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001); value[0] = 2; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001); + assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001); value[0] = 3; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001); + assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001); value[0] = 4; sampler.sample(); - Assert.assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001); + assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001); JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString()); - Assert.assertEquals(json.size(), 2); - Assert.assertEquals(json.get("sampler"), sampler.getRate()); - Assert.assertEquals(json.get("size"), 4L); + assertEquals(json.size(), 2); + assertEquals(json.get("sampler"), sampler.getRate()); + assertEquals(json.get("size"), 4L); StringWriter writer = new StringWriter(); sampler.writeJSONString(writer); writer.close(); json = (JSONObject) new JSONParser().parse(writer.toString()); - Assert.assertEquals(json.size(), 2); - Assert.assertEquals(json.get("sampler"), sampler.getRate()); - Assert.assertEquals(json.get("size"), 4L); + assertEquals(json.size(), 2); + assertEquals(json.get("sampler"), sampler.getRate()); + assertEquals(json.get("size"), 4L); } @Test @@ -283,15 +287,15 @@ public String getValue() { }; JSONObject json = (JSONObject) new JSONParser().parse(variableHolder.toJSONString()); - Assert.assertEquals(json.size(), 1); - Assert.assertEquals(json.get("value"), "foo"); + assertEquals(json.size(), 1); + assertEquals(json.get("value"), "foo"); StringWriter writer = new StringWriter(); variableHolder.writeJSONString(writer); writer.close(); json = (JSONObject) new JSONParser().parse(writer.toString()); - Assert.assertEquals(json.size(), 1); - Assert.assertEquals(json.get("value"), "foo"); + assertEquals(json.size(), 1); + assertEquals(json.get("value"), "foo"); } @Test @@ -306,7 +310,7 @@ public void service() throws Exception { server.init(); Instrumentation instrumentation = server.get(Instrumentation.class); - Assert.assertNotNull(instrumentation); + assertNotNull(instrumentation); instrumentation.incr("g", "c", 1); instrumentation.incr("g", "c", 2); instrumentation.incr("g", "c1", 2); @@ -339,27 +343,27 @@ public Long getValue() { instrumentation.addSampler("g", "s", 10, varToSample); Map snapshot = instrumentation.getSnapshot(); - Assert.assertNotNull(snapshot.get("os-env")); - Assert.assertNotNull(snapshot.get("sys-props")); - Assert.assertNotNull(snapshot.get("jvm")); - Assert.assertNotNull(snapshot.get("counters")); - Assert.assertNotNull(snapshot.get("timers")); - Assert.assertNotNull(snapshot.get("variables")); - Assert.assertNotNull(snapshot.get("samplers")); - Assert.assertNotNull(((Map) snapshot.get("os-env")).get("PATH")); - Assert.assertNotNull(((Map) snapshot.get("sys-props")).get("java.version")); - Assert.assertNotNull(((Map) snapshot.get("jvm")).get("free.memory")); - Assert.assertNotNull(((Map) snapshot.get("jvm")).get("max.memory")); - Assert.assertNotNull(((Map) snapshot.get("jvm")).get("total.memory")); - Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("timers")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("variables")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("samplers")).get("g")); - Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c")); - Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c1")); - Assert.assertNotNull(((Map>) snapshot.get("timers")).get("g").get("t")); - Assert.assertNotNull(((Map>) snapshot.get("variables")).get("g").get("v")); - Assert.assertNotNull(((Map>) snapshot.get("samplers")).get("g").get("s")); + assertNotNull(snapshot.get("os-env")); + assertNotNull(snapshot.get("sys-props")); + assertNotNull(snapshot.get("jvm")); + assertNotNull(snapshot.get("counters")); + assertNotNull(snapshot.get("timers")); + assertNotNull(snapshot.get("variables")); + assertNotNull(snapshot.get("samplers")); + assertNotNull(((Map) snapshot.get("os-env")).get("PATH")); + assertNotNull(((Map) snapshot.get("sys-props")).get("java.version")); + assertNotNull(((Map) snapshot.get("jvm")).get("free.memory")); + assertNotNull(((Map) snapshot.get("jvm")).get("max.memory")); + assertNotNull(((Map) snapshot.get("jvm")).get("total.memory")); + assertNotNull(((Map>) snapshot.get("counters")).get("g")); + assertNotNull(((Map>) snapshot.get("timers")).get("g")); + assertNotNull(((Map>) snapshot.get("variables")).get("g")); + assertNotNull(((Map>) snapshot.get("samplers")).get("g")); + assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c")); + assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c1")); + assertNotNull(((Map>) snapshot.get("timers")).get("g").get("t")); + assertNotNull(((Map>) snapshot.get("variables")).get("g").get("v")); + assertNotNull(((Map>) snapshot.get("samplers")).get("g").get("s")); StringWriter writer = new StringWriter(); JSONObject.writeJSONString(snapshot, writer); @@ -392,12 +396,12 @@ public Long getValue() { sleep(2000); int i = count.get(); - Assert.assertTrue(i > 0); + assertTrue(i > 0); Map> snapshot = instrumentation.getSnapshot(); Map> samplers = (Map>) snapshot.get("samplers"); InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s"); - Assert.assertTrue(sampler.getRate() > 0); + assertTrue(sampler.getRate() > 0); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java index 5e4a982b63..f8abb48e7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java @@ -18,7 +18,10 @@ package org.apache.hadoop.lib.service.scheduler; -import junit.framework.Assert; +import static org.junit.Assert.assertNotNull; + +import java.util.Arrays; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.service.Scheduler; @@ -29,8 +32,6 @@ import org.apache.hadoop.util.StringUtils; import org.junit.Test; -import java.util.Arrays; - public class TestSchedulerService extends HTestCase { @Test @@ -42,7 +43,7 @@ public void service() throws Exception { SchedulerService.class.getName()))); Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); - Assert.assertNotNull(server.get(Scheduler.class)); + assertNotNull(server.get(Scheduler.class)); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java index bd2f0ac07b..167690902a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.lib.service.security; -import org.apache.hadoop.security.GroupMappingServiceProvider; -import org.apache.hadoop.test.HadoopUsersConfTestHelper; - import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.apache.hadoop.security.GroupMappingServiceProvider; +import org.apache.hadoop.test.HadoopUsersConfTestHelper; + public class DummyGroupMapping implements GroupMappingServiceProvider { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java index bb4a29cae2..445192b66f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java @@ -18,7 +18,12 @@ package org.apache.hadoop.lib.service.security; -import junit.framework.Assert; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; + +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.service.Groups; @@ -28,9 +33,6 @@ import org.apache.hadoop.util.StringUtils; import org.junit.Test; -import java.util.Arrays; -import java.util.List; - public class TestGroupsService extends HTestCase { @Test @@ -42,9 +44,9 @@ public void service() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); Groups groups = server.get(Groups.class); - Assert.assertNotNull(groups); + assertNotNull(groups); List g = groups.getGroups(System.getProperty("user.name")); - Assert.assertNotSame(g.size(), 0); + assertNotSame(g.size(), 0); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java index 3d4115e8c7..294f5e80b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java @@ -18,7 +18,12 @@ package org.apache.hadoop.lib.service.security; -import junit.framework.Assert; +import static org.junit.Assert.assertNotNull; + +import java.security.AccessControlException; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.lib.server.ServiceException; @@ -31,10 +36,6 @@ import org.apache.hadoop.util.StringUtils; import org.junit.Test; -import java.security.AccessControlException; -import java.util.Arrays; -import java.util.List; - public class TestProxyUserService extends HTestCase { @Test @@ -47,7 +48,7 @@ public void service() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); server.destroy(); } @@ -103,7 +104,7 @@ public void validateAnyHostAnyUser() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", "bar"); server.destroy(); } @@ -120,7 +121,7 @@ public void invalidProxyUser() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("bar", "localhost", "foo"); server.destroy(); } @@ -137,7 +138,7 @@ public void validateHost() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", "bar"); server.destroy(); } @@ -166,7 +167,7 @@ public void validateGroup() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", System.getProperty("user.name")); server.destroy(); } @@ -184,7 +185,7 @@ public void unknownHost() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "unknownhost.bar.foo", "bar"); server.destroy(); } @@ -201,7 +202,7 @@ public void invalidHost() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "www.yahoo.com", "bar"); server.destroy(); } @@ -218,7 +219,7 @@ public void invalidGroup() throws Exception { Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); ProxyUser proxyUser = server.get(ProxyUser.class); - Assert.assertNotNull(proxyUser); + assertNotNull(proxyUser); proxyUser.validate("foo", "localhost", System.getProperty("user.name")); server.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java index f3a2a5ad65..44da0afd70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java @@ -18,18 +18,21 @@ package org.apache.hadoop.lib.servlet; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; -import org.mockito.Mockito; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.mockito.Mockito; public class TestHostnameFilter extends HTestCase { @@ -47,17 +50,17 @@ public void hostname() throws Exception { @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertTrue(HostnameFilter.get().contains("localhost")); + assertTrue(HostnameFilter.get().contains("localhost")); invoked.set(true); } }; Filter filter = new HostnameFilter(); filter.init(null); - Assert.assertNull(HostnameFilter.get()); + assertNull(HostnameFilter.get()); filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); - Assert.assertNull(HostnameFilter.get()); + assertTrue(invoked.get()); + assertNull(HostnameFilter.get()); filter.destroy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java index 216af5fa50..911cc0ad23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java @@ -18,11 +18,13 @@ package org.apache.hadoop.lib.servlet; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; -import org.mockito.Mockito; -import org.slf4j.MDC; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.security.Principal; +import java.util.concurrent.atomic.AtomicBoolean; import javax.servlet.Filter; import javax.servlet.FilterChain; @@ -30,9 +32,11 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; -import java.io.IOException; -import java.security.Principal; -import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.mockito.Mockito; +import org.slf4j.MDC; public class TestMDCFilter extends HTestCase { @@ -52,10 +56,10 @@ public void mdc() throws Exception { @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertEquals(MDC.get("hostname"), null); - Assert.assertEquals(MDC.get("user"), null); - Assert.assertEquals(MDC.get("method"), "METHOD"); - Assert.assertEquals(MDC.get("path"), "/pathinfo"); + assertEquals(MDC.get("hostname"), null); + assertEquals(MDC.get("user"), null); + assertEquals(MDC.get("method"), "METHOD"); + assertEquals(MDC.get("path"), "/pathinfo"); invoked.set(true); } }; @@ -65,11 +69,11 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo filter.init(null); filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); - Assert.assertNull(MDC.get("hostname")); - Assert.assertNull(MDC.get("user")); - Assert.assertNull(MDC.get("method")); - Assert.assertNull(MDC.get("path")); + assertTrue(invoked.get()); + assertNull(MDC.get("hostname")); + assertNull(MDC.get("user")); + assertNull(MDC.get("method")); + assertNull(MDC.get("path")); Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() { @Override @@ -83,15 +87,15 @@ public String getName() { @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertEquals(MDC.get("hostname"), null); - Assert.assertEquals(MDC.get("user"), "name"); - Assert.assertEquals(MDC.get("method"), "METHOD"); - Assert.assertEquals(MDC.get("path"), "/pathinfo"); + assertEquals(MDC.get("hostname"), null); + assertEquals(MDC.get("user"), "name"); + assertEquals(MDC.get("method"), "METHOD"); + assertEquals(MDC.get("path"), "/pathinfo"); invoked.set(true); } }; filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); + assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.set("HOST"); @@ -100,15 +104,15 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { - Assert.assertEquals(MDC.get("hostname"), "HOST"); - Assert.assertEquals(MDC.get("user"), "name"); - Assert.assertEquals(MDC.get("method"), "METHOD"); - Assert.assertEquals(MDC.get("path"), "/pathinfo"); + assertEquals(MDC.get("hostname"), "HOST"); + assertEquals(MDC.get("user"), "name"); + assertEquals(MDC.get("method"), "METHOD"); + assertEquals(MDC.get("path"), "/pathinfo"); invoked.set(true); } }; filter.doFilter(request, response, chain); - Assert.assertTrue(invoked.get()); + assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.remove(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java index 380fa3e081..0234266e4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java @@ -18,7 +18,8 @@ package org.apache.hadoop.lib.servlet; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; + import org.apache.hadoop.lib.server.Server; import org.apache.hadoop.test.HTestCase; import org.apache.hadoop.test.TestDir; @@ -35,10 +36,10 @@ public void getHomeDirNotDef() { @Test public void getHomeDir() { System.setProperty("TestServerWebApp0.home.dir", "/tmp"); - Assert.assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp"); - Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log"); + assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp"); + assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log"); System.setProperty("TestServerWebApp0.log.dir", "/tmplog"); - Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog"); + assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog"); } @Test @@ -52,11 +53,11 @@ public void lifecycle() throws Exception { ServerWebApp server = new ServerWebApp("TestServerWebApp1") { }; - Assert.assertEquals(server.getStatus(), Server.Status.UNDEF); + assertEquals(server.getStatus(), Server.Status.UNDEF); server.contextInitialized(null); - Assert.assertEquals(server.getStatus(), Server.Status.NORMAL); + assertEquals(server.getStatus(), Server.Status.NORMAL); server.contextDestroyed(null); - Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN); + assertEquals(server.getStatus(), Server.Status.SHUTDOWN); } @Test(expected = RuntimeException.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java index 532ad369de..877dcd46bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java @@ -19,18 +19,19 @@ package org.apache.hadoop.lib.util; -import junit.framework.Assert; -import org.apache.hadoop.test.HTestCase; -import org.junit.Test; +import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.Arrays; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; + public class TestCheck extends HTestCase { @Test public void notNullNotNull() { - Assert.assertEquals(Check.notNull("value", "name"), "value"); + assertEquals(Check.notNull("value", "name"), "value"); } @Test(expected = IllegalArgumentException.class) @@ -79,7 +80,7 @@ public void notEmptyElementsEmptyElements() { @Test public void notEmptyNotEmtpy() { - Assert.assertEquals(Check.notEmpty("value", "name"), "value"); + assertEquals(Check.notEmpty("value", "name"), "value"); } @Test(expected = IllegalArgumentException.class) @@ -94,10 +95,10 @@ public void notEmptyEmpty() { @Test public void validIdentifierValid() throws Exception { - Assert.assertEquals(Check.validIdentifier("a", 1, ""), "a"); - Assert.assertEquals(Check.validIdentifier("a1", 2, ""), "a1"); - Assert.assertEquals(Check.validIdentifier("a_", 3, ""), "a_"); - Assert.assertEquals(Check.validIdentifier("_", 1, ""), "_"); + assertEquals(Check.validIdentifier("a", 1, ""), "a"); + assertEquals(Check.validIdentifier("a1", 2, ""), "a1"); + assertEquals(Check.validIdentifier("a_", 3, ""), "a_"); + assertEquals(Check.validIdentifier("_", 1, ""), "_"); } @Test(expected = IllegalArgumentException.class) @@ -117,7 +118,7 @@ public void validIdentifierInvalid3() throws Exception { @Test public void checkGTZeroGreater() { - Assert.assertEquals(Check.gt0(120, "test"), 120); + assertEquals(Check.gt0(120, "test"), 120); } @Test(expected = IllegalArgumentException.class) @@ -132,8 +133,8 @@ public void checkGTZeroLessThanZero() { @Test public void checkGEZero() { - Assert.assertEquals(Check.ge0(120, "test"), 120); - Assert.assertEquals(Check.ge0(0, "test"), 0); + assertEquals(Check.ge0(120, "test"), 120); + assertEquals(Check.ge0(0, "test"), 0); } @Test(expected = IllegalArgumentException.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java index 48b5f9155f..925edc5408 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java @@ -18,27 +18,29 @@ package org.apache.hadoop.lib.util; -import junit.framework.Assert; -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + public class TestConfigurationUtils { @Test public void constructors() throws Exception { Configuration conf = new Configuration(false); - Assert.assertEquals(conf.size(), 0); + assertEquals(conf.size(), 0); byte[] bytes = "aA".getBytes(); InputStream is = new ByteArrayInputStream(bytes); conf = new Configuration(false); ConfigurationUtils.load(conf, is); - Assert.assertEquals(conf.size(), 1); - Assert.assertEquals(conf.get("a"), "A"); + assertEquals(conf.size(), 1); + assertEquals(conf.get("a"), "A"); } @@ -62,9 +64,9 @@ public void copy() throws Exception { ConfigurationUtils.copy(srcConf, targetConf); - Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); - Assert.assertEquals("valueFromSource", targetConf.get("testParameter2")); - Assert.assertEquals("valueFromTarget", targetConf.get("testParameter3")); + assertEquals("valueFromSource", targetConf.get("testParameter1")); + assertEquals("valueFromSource", targetConf.get("testParameter2")); + assertEquals("valueFromTarget", targetConf.get("testParameter3")); } @Test @@ -80,13 +82,13 @@ public void injectDefaults() throws Exception { ConfigurationUtils.injectDefaults(srcConf, targetConf); - Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); - Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter2")); - Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter3")); + assertEquals("valueFromSource", targetConf.get("testParameter1")); + assertEquals("originalValueFromTarget", targetConf.get("testParameter2")); + assertEquals("originalValueFromTarget", targetConf.get("testParameter3")); - Assert.assertEquals("valueFromSource", srcConf.get("testParameter1")); - Assert.assertEquals("valueFromSource", srcConf.get("testParameter2")); - Assert.assertNull(srcConf.get("testParameter3")); + assertEquals("valueFromSource", srcConf.get("testParameter1")); + assertEquals("valueFromSource", srcConf.get("testParameter2")); + assertNull(srcConf.get("testParameter3")); } @@ -95,11 +97,11 @@ public void resolve() { Configuration conf = new Configuration(false); conf.set("a", "A"); conf.set("b", "${a}"); - Assert.assertEquals(conf.getRaw("a"), "A"); - Assert.assertEquals(conf.getRaw("b"), "${a}"); + assertEquals(conf.getRaw("a"), "A"); + assertEquals(conf.getRaw("b"), "${a}"); conf = ConfigurationUtils.resolve(conf); - Assert.assertEquals(conf.getRaw("a"), "A"); - Assert.assertEquals(conf.getRaw("b"), "A"); + assertEquals(conf.getRaw("a"), "A"); + assertEquals(conf.getRaw("b"), "A"); } @Test @@ -110,16 +112,16 @@ public void testVarResolutionAndSysProps() { conf.set("b", "${a}"); conf.set("c", "${user.name}"); conf.set("d", "${aaa}"); - Assert.assertEquals(conf.getRaw("a"), "A"); - Assert.assertEquals(conf.getRaw("b"), "${a}"); - Assert.assertEquals(conf.getRaw("c"), "${user.name}"); - Assert.assertEquals(conf.get("a"), "A"); - Assert.assertEquals(conf.get("b"), "A"); - Assert.assertEquals(conf.get("c"), userName); - Assert.assertEquals(conf.get("d"), "${aaa}"); + assertEquals(conf.getRaw("a"), "A"); + assertEquals(conf.getRaw("b"), "${a}"); + assertEquals(conf.getRaw("c"), "${user.name}"); + assertEquals(conf.get("a"), "A"); + assertEquals(conf.get("b"), "A"); + assertEquals(conf.get("c"), userName); + assertEquals(conf.get("d"), "${aaa}"); conf.set("user.name", "foo"); - Assert.assertEquals(conf.get("user.name"), "foo"); + assertEquals(conf.get("user.name"), "foo"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java index c3e0200d6e..0fa9409306 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java @@ -18,13 +18,14 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.junit.Test; +import static org.junit.Assert.assertEquals; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.InputStream; +import org.junit.Test; + public class TestInputStreamEntity { @Test @@ -34,14 +35,14 @@ public void test() throws Exception { InputStreamEntity i = new InputStreamEntity(is); i.write(baos); baos.close(); - Assert.assertEquals(new String(baos.toByteArray()), "abc"); + assertEquals(new String(baos.toByteArray()), "abc"); is = new ByteArrayInputStream("abc".getBytes()); baos = new ByteArrayOutputStream(); i = new InputStreamEntity(is, 1, 1); i.write(baos); baos.close(); - Assert.assertEquals(baos.toByteArray()[0], 'b'); + assertEquals(baos.toByteArray()[0], 'b'); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java index afb07572e7..099378032d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java @@ -18,28 +18,31 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.json.simple.JSONObject; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.util.Map; +import org.json.simple.JSONObject; +import org.junit.Test; + public class TestJSONMapProvider { @Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONMapProvider p = new JSONMapProvider(); - Assert.assertTrue(p.isWriteable(Map.class, null, null, null)); - Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null)); - Assert.assertEquals(p.getSize(null, null, null, null, null), -1); + assertTrue(p.isWriteable(Map.class, null, null, null)); + assertFalse(p.isWriteable(this.getClass(), null, null, null)); + assertEquals(p.getSize(null, null, null, null, null), -1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); JSONObject json = new JSONObject(); json.put("a", "A"); p.writeTo(json, JSONObject.class, null, null, null, null, baos); baos.close(); - Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); + assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java index a9ac9a2d74..5f747500ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java @@ -18,27 +18,30 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.json.simple.JSONObject; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; +import org.json.simple.JSONObject; +import org.junit.Test; + public class TestJSONProvider { @Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONProvider p = new JSONProvider(); - Assert.assertTrue(p.isWriteable(JSONObject.class, null, null, null)); - Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null)); - Assert.assertEquals(p.getSize(null, null, null, null, null), -1); + assertTrue(p.isWriteable(JSONObject.class, null, null, null)); + assertFalse(p.isWriteable(this.getClass(), null, null, null)); + assertEquals(p.getSize(null, null, null, null, null), -1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); JSONObject json = new JSONObject(); json.put("a", "A"); p.writeTo(json, JSONObject.class, null, null, null, null, baos); baos.close(); - Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); + assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java index e2376879d7..92719db7a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java @@ -18,41 +18,43 @@ package org.apache.hadoop.lib.wsrs; -import junit.framework.Assert; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.util.regex.Pattern; +import org.junit.Test; + public class TestParam { private void test(Param param, String name, String domain, T defaultValue, T validValue, String invalidStrValue, String outOfRangeValue) throws Exception { - Assert.assertEquals(name, param.getName()); - Assert.assertEquals(domain, param.getDomain()); - Assert.assertEquals(defaultValue, param.value()); - Assert.assertEquals(defaultValue, param.parseParam("")); - Assert.assertEquals(defaultValue, param.parseParam(null)); - Assert.assertEquals(validValue, param.parseParam(validValue.toString())); + assertEquals(name, param.getName()); + assertEquals(domain, param.getDomain()); + assertEquals(defaultValue, param.value()); + assertEquals(defaultValue, param.parseParam("")); + assertEquals(defaultValue, param.parseParam(null)); + assertEquals(validValue, param.parseParam(validValue.toString())); if (invalidStrValue != null) { try { param.parseParam(invalidStrValue); - Assert.fail(); + fail(); } catch (IllegalArgumentException ex) { //NOP } catch (Exception ex) { - Assert.fail(); + fail(); } } if (outOfRangeValue != null) { try { param.parseParam(outOfRangeValue); - Assert.fail(); + fail(); } catch (IllegalArgumentException ex) { //NOP } catch (Exception ex) { - Assert.fail(); + fail(); } } } @@ -81,7 +83,7 @@ public void testShort() throws Exception { param = new ShortParam("S", (short) 1, 8) { }; - Assert.assertEquals(new Short((short)01777), param.parse("01777")); + assertEquals(new Short((short)01777), param.parse("01777")); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java index 72d79a9392..2e5c646f37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java @@ -18,16 +18,20 @@ package org.apache.hadoop.lib.wsrs; -import com.sun.jersey.api.core.HttpContext; -import com.sun.jersey.api.core.HttpRequestContext; -import com.sun.jersey.core.spi.component.ComponentScope; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import java.security.Principal; + +import javax.ws.rs.core.MultivaluedMap; + import org.junit.Test; import org.mockito.Mockito; import org.slf4j.MDC; -import javax.ws.rs.core.MultivaluedMap; -import java.security.Principal; +import com.sun.jersey.api.core.HttpContext; +import com.sun.jersey.api.core.HttpRequestContext; +import com.sun.jersey.core.spi.component.ComponentScope; public class TestUserProvider { @@ -43,8 +47,8 @@ public void noUser() { HttpContext context = Mockito.mock(HttpContext.class); Mockito.when(context.getRequest()).thenReturn(request); UserProvider up = new UserProvider(); - Assert.assertNull(up.getValue(context)); - Assert.assertNull(MDC.get("user")); + assertNull(up.getValue(context)); + assertNull(MDC.get("user")); } @Test @@ -59,8 +63,8 @@ public void queryStringUser() { HttpContext context = Mockito.mock(HttpContext.class); Mockito.when(context.getRequest()).thenReturn(request); UserProvider up = new UserProvider(); - Assert.assertEquals(up.getValue(context).getName(), "foo"); - Assert.assertEquals(MDC.get("user"), "foo"); + assertEquals(up.getValue(context).getName(), "foo"); + assertEquals(MDC.get("user"), "foo"); } @Test @@ -77,15 +81,15 @@ public String getName() { HttpContext context = Mockito.mock(HttpContext.class); Mockito.when(context.getRequest()).thenReturn(request); UserProvider up = new UserProvider(); - Assert.assertEquals(up.getValue(context).getName(), "bar"); - Assert.assertEquals(MDC.get("user"), "bar"); + assertEquals(up.getValue(context).getName(), "bar"); + assertEquals(MDC.get("user"), "bar"); } @Test public void getters() { UserProvider up = new UserProvider(); - Assert.assertEquals(up.getScope(), ComponentScope.PerRequest); - Assert.assertEquals(up.getInjectable(null, null, Principal.class), up); - Assert.assertNull(up.getInjectable(null, null, String.class)); + assertEquals(up.getScope(), ComponentScope.PerRequest); + assertEquals(up.getInjectable(null, null, Principal.class), up); + assertNull(up.getInjectable(null, null, String.class)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java index 316fc9f7a2..38956994d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.test; -import junit.framework.Assert; +import static org.junit.Assert.fail; + +import java.text.MessageFormat; import org.apache.hadoop.util.Time; import org.junit.Rule; import org.junit.rules.MethodRule; -import java.text.MessageFormat; - public abstract class HTestCase { public static final String TEST_WAITFOR_RATIO_PROP = "test.waitfor.ratio"; @@ -161,7 +161,7 @@ protected long waitFor(int timeout, boolean failIfTimeout, Predicate predicate) } if (!eval) { if (failIfTimeout) { - Assert.fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); + fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); } else { System.out.println(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java index f27d0efaae..57af33664e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.test; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.UserGroupInformation; - import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; + /** * Helper to configure FileSystemAccess user/group and proxyuser * configuration for testing using Java System properties. diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java index c3f3d53c38..3368c79c7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java @@ -17,16 +17,16 @@ */ package org.apache.hadoop.test; -import org.junit.Test; -import org.junit.rules.MethodRule; -import org.junit.runners.model.FrameworkMethod; -import org.junit.runners.model.Statement; - import java.io.File; import java.io.IOException; import java.text.MessageFormat; import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.Statement; + public class TestDirHelper implements MethodRule { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java index 8411db4754..e3af643513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.test; -import junit.framework.Assert; +import static org.junit.Assert.fail; + +import java.util.regex.Pattern; + import org.junit.Test; import org.junit.rules.MethodRule; import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.Statement; -import java.util.regex.Pattern; - public class TestExceptionHelper implements MethodRule { @Test @@ -41,7 +42,7 @@ public void evaluate() throws Throwable { statement.evaluate(); if (testExceptionAnnotation != null) { Class klass = testExceptionAnnotation.exception(); - Assert.fail("Expected Exception: " + klass.getSimpleName()); + fail("Expected Exception: " + klass.getSimpleName()); } } catch (Throwable ex) { if (testExceptionAnnotation != null) { @@ -50,10 +51,10 @@ public void evaluate() throws Throwable { String regExp = testExceptionAnnotation.msgRegExp(); Pattern pattern = Pattern.compile(regExp); if (!pattern.matcher(ex.getMessage()).find()) { - Assert.fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage()); + fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage()); } } else { - Assert.fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName()); + fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName()); } } else { throw ex; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java index b001c1cb98..f4996de542 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java @@ -18,19 +18,9 @@ package org.apache.hadoop.test; -import junit.framework.Assert; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.util.Time; -import org.junit.Test; -import org.mortbay.jetty.Server; -import org.mortbay.jetty.servlet.Context; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -39,6 +29,19 @@ import java.net.HttpURLConnection; import java.net.URL; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Time; +import org.junit.Test; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.servlet.Context; + public class TestHFSTestCase extends HFSTestCase { @Test(expected = IllegalStateException.class) @@ -69,7 +72,7 @@ public void testHdfsNoAnnotation2() throws Exception { @Test @TestDir public void testDirAnnotation() throws Exception { - Assert.assertNotNull(TestDirHelper.getTestDir()); + assertNotNull(TestDirHelper.getTestDir()); } @Test @@ -81,8 +84,8 @@ public boolean evaluate() throws Exception { } }); long end = Time.now(); - Assert.assertEquals(waited, 0, 50); - Assert.assertEquals(end - start - waited, 0, 50); + assertEquals(waited, 0, 50); + assertEquals(end - start - waited, 0, 50); } @Test @@ -95,8 +98,8 @@ public boolean evaluate() throws Exception { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200, 50); + assertEquals(waited, -1); + assertEquals(end - start, 200, 50); } @Test @@ -109,8 +112,8 @@ public boolean evaluate() throws Exception { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(waited, -1); + assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); } @Test @@ -119,7 +122,7 @@ public void sleepRatio1() { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100, 50); + assertEquals(end - start, 100, 50); } @Test @@ -128,7 +131,7 @@ public void sleepRatio2() { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); } @Test @@ -141,8 +144,8 @@ public void testHadoopFileSystem() throws Exception { os.write(new byte[]{1}); os.close(); InputStream is = fs.open(new Path(TestHdfsHelper.getHdfsTestDir(), "foo")); - Assert.assertEquals(is.read(), 1); - Assert.assertEquals(is.read(), -1); + assertEquals(is.read(), 1); + assertEquals(is.read(), -1); is.close(); } finally { fs.close(); @@ -167,9 +170,9 @@ public void testJetty() throws Exception { server.start(); URL url = new URL(TestJettyHelper.getJettyURL(), "/bar"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); - Assert.assertEquals(reader.readLine(), "foo"); + assertEquals(reader.readLine(), "foo"); reader.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java index f6af2a6c8e..10c798f3fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java @@ -18,23 +18,25 @@ package org.apache.hadoop.test; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; -import org.apache.hadoop.util.Time; -import org.junit.Test; -import org.mortbay.jetty.Server; -import org.mortbay.jetty.servlet.Context; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.util.Time; +import org.junit.Test; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.servlet.Context; + public class TestHTestCase extends HTestCase { @Test(expected = IllegalStateException.class) @@ -55,7 +57,7 @@ public void testJettyNoAnnotation2() throws Exception { @Test @TestDir public void testDirAnnotation() throws Exception { - Assert.assertNotNull(TestDirHelper.getTestDir()); + assertNotNull(TestDirHelper.getTestDir()); } @Test @@ -67,8 +69,8 @@ public boolean evaluate() throws Exception { } }); long end = Time.now(); - Assert.assertEquals(waited, 0, 50); - Assert.assertEquals(end - start - waited, 0, 50); + assertEquals(waited, 0, 50); + assertEquals(end - start - waited, 0, 50); } @Test @@ -81,8 +83,8 @@ public boolean evaluate() throws Exception { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200, 50); + assertEquals(waited, -1); + assertEquals(end - start, 200, 50); } @Test @@ -95,8 +97,8 @@ public boolean evaluate() throws Exception { } }); long end = Time.now(); - Assert.assertEquals(waited, -1); - Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(waited, -1); + assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); } @Test @@ -105,7 +107,7 @@ public void sleepRatio1() { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100, 50); + assertEquals(end - start, 100, 50); } @Test @@ -114,7 +116,7 @@ public void sleepRatio2() { long start = Time.now(); sleep(100); long end = Time.now(); - Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); + assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); } public static class MyServlet extends HttpServlet { @@ -135,9 +137,9 @@ public void testJetty() throws Exception { server.start(); URL url = new URL(TestJettyHelper.getJettyURL(), "/bar"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); - Assert.assertEquals(reader.readLine(), "foo"); + assertEquals(reader.readLine(), "foo"); reader.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java index cd030695f9..2afd7d35a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.test; +import java.io.File; +import java.util.concurrent.atomic.AtomicInteger; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -26,9 +29,6 @@ import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.Statement; -import java.io.File; -import java.util.concurrent.atomic.AtomicInteger; - public class TestHdfsHelper extends TestDirHelper { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java index 1a4f5b215e..95cb10463c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.test; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.ServerSocket; +import java.net.URL; + import org.junit.Test; import org.junit.rules.MethodRule; import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.Statement; import org.mortbay.jetty.Server; -import java.net.InetAddress; -import java.net.MalformedURLException; -import java.net.ServerSocket; -import java.net.URL; - public class TestJettyHelper implements MethodRule { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/TestRaidDfs.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/TestRaidDfs.java index 36aab52ee5..ae7d0293a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/TestRaidDfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/TestRaidDfs.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.BufferedReader; import java.io.File; import java.io.FileNotFoundException; @@ -28,8 +31,6 @@ import java.util.regex.Pattern; import java.util.zip.CRC32; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -44,8 +45,9 @@ import org.apache.hadoop.raid.RaidUtils; import org.apache.hadoop.raid.protocol.PolicyInfo.ErasureCodeType; import org.apache.hadoop.util.StringUtils; +import org.junit.Test; -public class TestRaidDfs extends TestCase { +public class TestRaidDfs { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); final static String LOG_DIR = "target/raidlog"; @@ -195,6 +197,7 @@ private void corruptBlockAndValidate(Path srcFile, Path destPath, * Create a file, corrupt several blocks in it and ensure that the file can be * read through DistributedRaidFileSystem by ReedSolomon coding. */ + @Test public void testRaidDfsRs() throws Exception { LOG.info("Test testRaidDfs started."); @@ -224,6 +227,7 @@ public void testRaidDfsRs() throws Exception { /** * Test DistributedRaidFileSystem.readFully() */ + @Test public void testReadFully() throws Exception { code = ErasureCodeType.XOR; stripeLength = 3; @@ -268,6 +272,7 @@ public void testReadFully() throws Exception { * Test that access time and mtime of a source file do not change after * raiding. */ + @Test public void testAccessTime() throws Exception { LOG.info("Test testAccessTime started."); @@ -300,6 +305,7 @@ public void testAccessTime() throws Exception { * Create a file, corrupt a block in it and ensure that the file can be * read through DistributedRaidFileSystem by XOR code. */ + @Test public void testRaidDfsXor() throws Exception { LOG.info("Test testRaidDfs started."); diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixer.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixer.java index 831311a1cc..960d11e30d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixer.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.FileWriter; import java.io.IOException; @@ -26,32 +31,27 @@ import java.util.Random; import java.util.zip.CRC32; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.junit.Test; -import static org.junit.Assert.*; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.JarFinder; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.RaidDFSUtil; +import org.apache.hadoop.hdfs.TestRaidDfs; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobContext; import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.RaidDFSUtil; -import org.apache.hadoop.hdfs.TestRaidDfs; -import org.apache.hadoop.raid.RaidNode; -import org.apache.hadoop.raid.RaidUtils; +import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; +import org.apache.hadoop.util.JarFinder; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.junit.Test; public class TestBlockFixer { diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java index 40ee9516be..6eeba5be4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.raid; import org.junit.Test; -import static org.junit.Assert.*; public class TestBlockFixerBlockFixDist extends TestBlockFixer { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java index 46fab09296..c425772274 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java @@ -17,19 +17,19 @@ */ package org.apache.hadoop.raid; -import org.junit.Test; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.RaidDFSUtil; import org.apache.hadoop.hdfs.TestRaidDfs; -import org.apache.hadoop.raid.RaidNode; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.junit.Test; public class TestBlockFixerDistConcurrency extends TestBlockFixer { /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java index c2f42b936d..bb061a004b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java @@ -18,7 +18,6 @@ package org.apache.hadoop.raid; import org.junit.Test; -import static org.junit.Assert.*; public class TestBlockFixerGeneratedBlockDist extends TestBlockFixer { /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java index a51bc0abc4..91d2ae54c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java @@ -18,7 +18,6 @@ package org.apache.hadoop.raid; import org.junit.Test; -import static org.junit.Assert.*; public class TestBlockFixerParityBlockFixDist extends TestBlockFixer { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestDirectoryTraversal.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestDirectoryTraversal.java index 111907788d..89cb6334fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestDirectoryTraversal.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestDirectoryTraversal.java @@ -17,27 +17,29 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.util.LinkedList; import java.util.List; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.util.Time; - import org.apache.hadoop.raid.protocol.PolicyInfo; +import org.apache.hadoop.util.Time; +import org.junit.Test; -public class TestDirectoryTraversal extends TestCase { +public class TestDirectoryTraversal { final static Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.TestDirectoryTraversal"); final static String TEST_DIR = new File(System.getProperty("test.build.data", @@ -50,6 +52,7 @@ public class TestDirectoryTraversal extends TestCase { /** * Test basic enumeration. */ + @Test public void testEnumeration() throws IOException { mySetup(); @@ -91,6 +94,7 @@ public void testEnumeration() throws IOException { } } + @Test public void testSuspension() throws IOException { LOG.info("Starting testSuspension"); mySetup(); @@ -128,6 +132,7 @@ public void testSuspension() throws IOException { } } + @Test public void testFileFilter() throws IOException { mySetup(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestErasureCodes.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestErasureCodes.java index 6069836781..d5e4b8e9fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestErasureCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestErasureCodes.java @@ -17,19 +17,22 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.util.HashSet; import java.util.Random; import java.util.Set; import org.apache.hadoop.util.Time; +import org.junit.Test; -import junit.framework.TestCase; - -public class TestErasureCodes extends TestCase { +public class TestErasureCodes { final int TEST_CODES = 100; final int TEST_TIMES = 1000; final Random RAND = new Random(); + @Test public void testEncodeDecode() { for (int n = 0; n < TEST_CODES; n++) { int stripeSize = RAND.nextInt(99) + 1; // 1, 2, 3, ... 100 @@ -67,6 +70,7 @@ public void testEncodeDecode() { } } + @Test public void testRSPerformance() { int stripeSize = 10; int paritySize = 4; @@ -131,6 +135,7 @@ public void testRSPerformance() { assertTrue("Decode failed", java.util.Arrays.equals(copy, message[0])); } + @Test public void testXorPerformance() { java.util.Random RAND = new java.util.Random(); int stripeSize = 10; @@ -171,6 +176,7 @@ public void testXorPerformance() { assertTrue("Decode failed", java.util.Arrays.equals(copy, message[0])); } + @Test public void testComputeErrorLocations() { for (int i = 0; i < TEST_TIMES; ++i) { verifyErrorLocations(10, 4, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestGaloisField.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestGaloisField.java index 576aa58b15..523a79e801 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestGaloisField.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestGaloisField.java @@ -17,13 +17,15 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertTrue; + +import java.util.HashSet; import java.util.Random; import java.util.Set; -import java.util.HashSet; -import junit.framework.TestCase; +import org.junit.Test; -public class TestGaloisField extends TestCase { +public class TestGaloisField { final int TEST_TIMES = 10000; final Random RAND = new Random(); @@ -40,6 +42,7 @@ private int[] randGFPoly(int len) { return result; } + @Test public void testGetInstance() { GaloisField gf1 = GaloisField.getInstance(256, 285); GaloisField gf2 = GaloisField.getInstance(); @@ -52,6 +55,7 @@ public void testGetInstance() { assertTrue(gf5 == gf6); } + @Test public void testDistributivity() { for (int i = 0; i < TEST_TIMES; i++) { int a = RAND.nextInt(GF.getFieldSize()); @@ -64,6 +68,7 @@ public void testDistributivity() { } } + @Test public void testDevision() { for (int i = 0; i < TEST_TIMES; i++) { int a = RAND.nextInt(GF.getFieldSize()); @@ -77,6 +82,7 @@ public void testDevision() { } } + @Test public void testPower() { for (int i = 0; i < TEST_TIMES; i++) { int a = randGF(); @@ -90,6 +96,7 @@ public void testPower() { } } + @Test public void testPolynomialDistributivity() { final int TEST_LEN = 15; for (int i = 0; i < TEST_TIMES; i++) { @@ -103,6 +110,7 @@ public void testPolynomialDistributivity() { } } + @Test public void testSubstitute() { final int TEST_LEN = 15; for (int i = 0; i < TEST_TIMES; i++) { @@ -121,6 +129,7 @@ public void testSubstitute() { } } + @Test public void testSolveVandermondeSystem() { final int TEST_LEN = 15; for (int i = 0; i < TEST_TIMES; i++) { @@ -151,6 +160,7 @@ public void testSolveVandermondeSystem() { } } + @Test public void testRemainder() { final int TEST_LEN = 15; for (int i = 0; i < TEST_TIMES; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java index 8ece5a67d2..791280ce88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java @@ -17,25 +17,30 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStreamWriter; import java.io.UnsupportedEncodingException; import java.nio.charset.Charset; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; -public class TestHarIndexParser extends TestCase { +public class TestHarIndexParser { final static Log LOG = LogFactory.getLog(TestHarIndexParser.class); File indexFile = null; - protected void setUp() throws FileNotFoundException, IOException { + @Before + public void setUp() throws FileNotFoundException, IOException { LOG.info("TestHarIndexParser.setUp()"); indexFile = File.createTempFile("harindex", ".tmp"); indexFile.deleteOnExit(); @@ -51,12 +56,14 @@ protected void setUp() throws FileNotFoundException, IOException { out.close(); } - protected void tearDown() { + @After + public void tearDown() { LOG.info("TestHarIndexParser.tearDown()"); if (indexFile != null) indexFile.delete(); } + @Test public void testHarIndexParser() throws UnsupportedEncodingException, IOException { LOG.info("testHarIndexParser started."); diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java index 4b99780903..e22339d409 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java @@ -17,25 +17,25 @@ */ package org.apache.hadoop.raid; -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.raid.protocol.PolicyInfo; import org.apache.hadoop.util.Time; +import org.junit.Test; -public class TestRaidFilter extends TestCase { +public class TestRaidFilter { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); final static Log LOG = @@ -59,6 +59,7 @@ private void myTearDown() throws Exception { if (dfs != null) { dfs.shutdown(); } } + @Test public void testLayeredPolicies() throws Exception { mySetup(); Path src1 = new Path("/user/foo"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java index d3aeab7bff..9df30b39a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java @@ -17,31 +17,32 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; + import java.io.File; -import java.io.FileWriter; import java.io.FileNotFoundException; +import java.io.FileWriter; import java.util.Random; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.log4j.Level; - -import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Level; +import org.junit.Test; /** * If a file gets deleted, then verify that the parity file gets deleted too. */ -public class TestRaidHar extends TestCase { +public class TestRaidHar { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); final static String CONFIG_FILE = new File(TEST_DIR, @@ -182,6 +183,7 @@ private void stopClusters() throws Exception { * Test that parity files that do not have an associated master file * get deleted. */ + @Test public void testRaidHar() throws Exception { LOG.info("Test testRaidHar started."); diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java index 79dd945fbf..aae0bcbab0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java @@ -17,26 +17,26 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; -import java.io.FileWriter; import java.io.FileNotFoundException; +import java.io.FileWriter; import java.io.IOException; import java.util.List; import java.util.Random; import java.util.zip.CRC32; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobContext; @@ -45,14 +45,16 @@ import org.apache.hadoop.raid.protocol.PolicyInfo; import org.apache.hadoop.raid.protocol.PolicyList; import org.apache.hadoop.util.JarFinder; -import org.apache.hadoop.raid.protocol.PolicyInfo.ErasureCodeType; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.junit.Test; /** * Test the generation of parity blocks for files with different block * sizes. Also test that a data block can be regenerated from a raid stripe * using the parity block */ -public class TestRaidNode extends TestCase { +public class TestRaidNode { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); public static final String DistRaid_JAR = JarFinder.getJar(DistRaid.class); @@ -258,6 +260,7 @@ private void stopClusters() throws Exception { /** * Test to run a filter */ + @Test public void testPathFilter() throws Exception { LOG.info("Test testPathFilter started."); @@ -513,6 +516,7 @@ static void createTestFiles(FileSystem fileSys, String path, String destpath, in /** * Test dist Raid */ + @Test public void testDistRaid() throws Exception { LOG.info("Test testDistRaid started."); long targetReplication = 2; @@ -664,6 +668,7 @@ private void validateFile(FileSystem fileSys, Path name1, Path name2, long crc) } } + @Test public void testSuspendTraversal() throws Exception { LOG.info("Test testSuspendTraversal started."); long targetReplication = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java index ce2c5b6a0c..5010dcf9fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java @@ -17,48 +17,37 @@ */ package org.apache.hadoop.raid; -import java.io.File; -import java.io.FileWriter; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Collection; -import java.util.GregorianCalendar; -import java.util.Iterator; -import java.util.List; -import java.util.Properties; -import java.util.Random; -import java.util.zip.CRC32; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileWriter; +import java.util.Random; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.log4j.Level; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.raid.protocol.PolicyInfo; -import org.apache.hadoop.raid.protocol.PolicyList; import org.apache.hadoop.hdfs.TestRaidDfs; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; import org.apache.hadoop.raid.protocol.PolicyInfo; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.apache.log4j.Level; +import org.junit.Test; /** * If a file gets deleted, then verify that the parity file gets deleted too. */ -public class TestRaidPurge extends TestCase { +public class TestRaidPurge { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); final static String CONFIG_FILE = new File(TEST_DIR, @@ -206,6 +195,7 @@ private void stopClusters() throws Exception { * Test that parity files that do not have an associated master file * get deleted. */ + @Test public void testPurge() throws Exception { LOG.info("Test testPurge started."); @@ -312,6 +302,7 @@ private void doTestPurge(int iter, long targetReplication, * Create a file, wait for parity file to get HARed. Then modify the file, * wait for the HAR to get purged. */ + @Test public void testPurgeHar() throws Exception { LOG.info("testPurgeHar started"); int harDelay = 0; @@ -381,6 +372,7 @@ public void testPurgeHar() throws Exception { * Create parity file, delete original file's directory and then validate that * parity directory is automatically deleted. */ + @Test public void testPurgeDirectory() throws Exception { long stripeLength = 5; long blockSize = 8192; @@ -433,6 +425,7 @@ public void testPurgeDirectory() throws Exception { /** * Test that an XOR parity file is removed when a RS parity file is detected. */ + @Test public void testPurgePreference() throws Exception { createClusters(true); Path dir = new Path("/user/test/raidtest/"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java index 3ca6a32ccd..1245e90b24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java @@ -17,34 +17,35 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.Random; import java.util.zip.CRC32; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.RaidDFSUtil; +import org.apache.hadoop.hdfs.TestRaidDfs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.TestRaidDfs; -import org.apache.hadoop.hdfs.RaidDFSUtil; -import org.apache.hadoop.raid.RaidNode; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; -public class TestRaidShell extends TestCase { +public class TestRaidShell { final static Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.TestRaidShell"); final static String TEST_DIR = new File(System.getProperty("test.build.data", @@ -65,6 +66,7 @@ public class TestRaidShell extends TestCase { * Create a file with three stripes, corrupt a block each in two stripes, * and wait for the the file to be fixed. */ + @Test public void testBlockFix() throws Exception { LOG.info("Test testBlockFix started."); long blockSize = 8192L; diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java index f7c46a9f08..bf7cdc05eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java @@ -17,34 +17,31 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertTrue; + import java.io.File; -import java.io.FileWriter; import java.io.FileNotFoundException; +import java.io.FileWriter; import java.io.IOException; import java.util.Random; -import org.junit.Test; -import org.junit.After; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.RaidDFSUtil; +import org.apache.hadoop.hdfs.TestRaidDfs; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.TestRaidDfs; -import org.apache.hadoop.hdfs.RaidDFSUtil; -import org.apache.hadoop.raid.RaidNode; -import org.apache.hadoop.raid.HarIndex; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.ToolRunner; +import org.junit.After; +import org.junit.Test; public class TestRaidShellFsck { diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java index 5f47cee293..d51617569c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java @@ -18,27 +18,29 @@ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.RaidDFSUtil; import org.apache.hadoop.hdfs.TestRaidDfs; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.mapred.Reporter; +import org.junit.Test; -public class TestReedSolomonDecoder extends TestCase { +public class TestReedSolomonDecoder { final static Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.TestReedSolomonDecoder"); final static String TEST_DIR = new File(System.getProperty("test.build.data", @@ -49,6 +51,7 @@ public class TestReedSolomonDecoder extends TestCase { MiniDFSCluster dfs = null; FileSystem fileSys = null; + @Test public void testDecoder() throws Exception { mySetup(); int stripeSize = 10; diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java index bd1201413a..86111d05e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java +++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java @@ -18,34 +18,23 @@ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; + import java.io.File; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.DistributedRaidFileSystem; import org.apache.hadoop.hdfs.TestRaidDfs; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.raid.RaidNode; +import org.junit.Test; -public class TestReedSolomonEncoder extends TestCase { +public class TestReedSolomonEncoder { final static Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.TestReedSolomonEncoder"); final static String TEST_DIR = new File(System.getProperty("test.build.data", @@ -57,6 +46,7 @@ public class TestReedSolomonEncoder extends TestCase { MiniDFSCluster dfs = null; FileSystem fileSys = null; + @Test public void testEncoder() throws Exception { mySetup(); int stripeSize = 10; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c33bcd5ac2..0b8e2b2692 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -109,6 +109,8 @@ Trunk (unreleased changes) HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay) + HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java index ece2261f7a..89932cc67c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.cli; -import org.apache.hadoop.cli.util.*; +import org.apache.hadoop.cli.util.CLICommandDFSAdmin; +import org.apache.hadoop.cli.util.CLICommandTypes; +import org.apache.hadoop.cli.util.CLITestCmd; +import org.apache.hadoop.cli.util.CommandExecutor; +import org.apache.hadoop.cli.util.FSCmdExecutor; import org.apache.hadoop.hdfs.tools.DFSAdmin; public class CLITestCmdDFS extends CLITestCmd { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java index 0b48665d8a..ebe7b5d1f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java @@ -18,6 +18,8 @@ package org.apache.hadoop.cli; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.cli.util.CLICommand; import org.apache.hadoop.cli.util.CommandExecutor.Result; import org.apache.hadoop.fs.FileSystem; @@ -27,7 +29,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.authorize.PolicyProvider; import org.junit.After; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 1d90050a7e..88610fd31c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -17,19 +17,23 @@ */ package org.apache.hadoop.fs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; -import junit.framework.TestCase; +public class TestGlobPaths { -public class TestGlobPaths extends TestCase { - static class RegexPathFilter implements PathFilter { - + private final String regex; public RegexPathFilter(String regex) { this.regex = regex; @@ -41,15 +45,15 @@ public boolean accept(Path path) { } } - + static private MiniDFSCluster dfsCluster; static private FileSystem fs; static final private int NUM_OF_PATHS = 4; static final String USER_DIR = "/user/"+System.getProperty("user.name"); private Path[] path = new Path[NUM_OF_PATHS]; - - @Override - protected void setUp() throws Exception { + + @Before + public void setUp() throws Exception { try { Configuration conf = new HdfsConfiguration(); dfsCluster = new MiniDFSCluster.Builder(conf).build(); @@ -59,13 +63,14 @@ protected void setUp() throws Exception { } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { if(dfsCluster!=null) { dfsCluster.shutdown(); } } + @Test public void testPathFilter() throws IOException { try { String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" }; @@ -78,6 +83,7 @@ public void testPathFilter() throws IOException { } } + @Test public void testPathFilterWithFixedLastComponent() throws IOException { try { String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b", @@ -91,6 +97,7 @@ public void testPathFilterWithFixedLastComponent() throws IOException { } } + @Test public void testGlob() throws Exception { //pTestEscape(); // need to wait until HADOOP-1995 is fixed pTestJavaRegexSpecialChars(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index 34410ed02f..a4f2d5fe3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -18,6 +18,9 @@ package org.apache.hadoop.fs; +import static org.apache.hadoop.fs.FileContextTestHelper.exists; +import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath; + import java.io.IOException; import java.net.URISyntaxException; @@ -27,8 +30,8 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.AfterClass; @@ -37,8 +40,6 @@ import org.junit.BeforeClass; import org.junit.Test; -import static org.apache.hadoop.fs.FileContextTestHelper.*; - public class TestHDFSFileContextMainOperations extends FileContextMainOperationsBaseTest { private static MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java index 1d5def6b48..516ff1a3d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java index bf22c7187f..d28736cffa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.fs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -25,19 +28,15 @@ import java.net.URISyntaxException; import java.net.URL; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsUrlStreamHandlerFactory; -import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * Test of the URL stream handler factory. */ -public class TestUrlStreamHandler extends TestCase { +public class TestUrlStreamHandler { /** * Test opening and reading from an InputStream through a hdfs:// URL. @@ -47,6 +46,7 @@ public class TestUrlStreamHandler extends TestCase { * * @throws IOException */ + @Test public void testDfsUrls() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -105,6 +105,7 @@ public void testDfsUrls() throws IOException { * @throws IOException * @throws URISyntaxException */ + @Test public void testFileUrls() throws IOException, URISyntaxException { // URLStreamHandler is already set in JVM by testDfsUrls() Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java index d6890f5c8f..e9e14ce8b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.fs.loadGenerator; +import static org.junit.Assert.assertEquals; + import java.io.BufferedReader; import java.io.File; import java.io.FileReader; @@ -27,9 +29,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; - -import static org.junit.Assert.*; - import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java index ca4862bd01..359a47ad95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.fs.permission; -import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import junit.framework.TestCase; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -32,8 +35,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Test; -public class TestStickyBit extends TestCase { +public class TestStickyBit { static UserGroupInformation user1 = UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"}); @@ -158,6 +162,7 @@ private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir) assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit()); } + @Test public void testGeneralSBBehavior() throws IOException, InterruptedException { MiniDFSCluster cluster = null; try { @@ -195,6 +200,7 @@ public void testGeneralSBBehavior() throws IOException, InterruptedException { * Test that one user can't rename/move another user's file when the sticky * bit is set. */ + @Test public void testMovingFiles() throws IOException, InterruptedException { MiniDFSCluster cluster = null; @@ -243,6 +249,7 @@ public void testMovingFiles() throws IOException, InterruptedException { * the sticky bit back on re-start, and that no extra sticky bits appear after * re-start. */ + @Test public void testStickyBitPersistence() throws IOException { MiniDFSCluster cluster = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java index f3a81ac9c0..cd23ef0638 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java @@ -18,26 +18,6 @@ package org.apache.hadoop.fs.viewfs; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; - -import javax.security.auth.login.LoginException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileSystemTestHelper; -import org.apache.hadoop.fs.FsConstants; -import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.fs.FsServerDefaults; -import org.apache.hadoop.hdfs.DFSConfigKeys; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; @@ -46,17 +26,29 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.*; /** * Tests for viewfs implementation of default fs level values. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java index 74c32d9c72..74d8bca614 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java @@ -23,6 +23,9 @@ * Since viewfs has overlayed ViewFsFileStatus, we ran into * serialization problems. THis test is test the fix. */ +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -40,11 +43,9 @@ import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.UserGroupInformation; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.*; public class TestViewFsFileStatusHdfs { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java index ff27da44cb..b78d7075d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.io.OutputStream; import java.util.Random; -import junit.framework.Assert; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -121,16 +120,16 @@ public static void check(FileSystem fs, Path p, long length) throws IOException FSDataInputStream in = fs.open(p); if (in.getWrappedStream() instanceof DFSInputStream) { long len = ((DFSInputStream)in.getWrappedStream()).getFileLength(); - TestCase.assertEquals(length, len); + assertEquals(length, len); } else { - TestCase.assertEquals(length, status.getLen()); + assertEquals(length, status.getLen()); } for(i++; i < length; i++) { - TestCase.assertEquals((byte)i, (byte)in.read()); + assertEquals((byte)i, (byte)in.read()); } i = -(int)length; - TestCase.assertEquals(-1, in.read()); //EOF + assertEquals(-1, in.read()); //EOF in.close(); } catch(IOException ioe) { throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe); @@ -175,7 +174,7 @@ public static void checkFullFile(FileSystem fs, Path name, int len, private static void checkData(final byte[] actual, int from, final byte[] expected, String message) { for (int idx = 0; idx < actual.length; idx++) { - Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+ + assertEquals(message+" byte "+(from+idx)+" differs. expected "+ expected[from+idx]+" actual "+actual[idx], expected[from+idx], actual[idx]); actual[idx] = 0; @@ -189,7 +188,7 @@ public static void testAppend(FileSystem fs, Path p) throws IOException { final FSDataOutputStream out = fs.create(p, (short)1); out.write(bytes); out.close(); - Assert.assertEquals(bytes.length, fs.getFileStatus(p).getLen()); + assertEquals(bytes.length, fs.getFileStatus(p).getLen()); } for(int i = 2; i < 500; i++) { @@ -197,7 +196,7 @@ public static void testAppend(FileSystem fs, Path p) throws IOException { final FSDataOutputStream out = fs.append(p); out.write(bytes); out.close(); - Assert.assertEquals(i*bytes.length, fs.getFileStatus(p).getLen()); + assertEquals(i*bytes.length, fs.getFileStatus(p).getLen()); } } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java index 0d47b9c5ba..b3dd55a9af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java @@ -36,7 +36,6 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; - import org.apache.log4j.Level; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 80503e67ea..24cac94f6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -18,25 +18,26 @@ package org.apache.hadoop.hdfs; -import java.net.Socket; -import java.net.InetSocketAddress; -import java.io.DataOutputStream; -import java.util.Random; -import java.util.List; -import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.DataOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.List; +import java.util.Random; + +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.net.NetUtils; -import static org.junit.Assert.*; - /** * A helper class to setup the cluster, and get to BlockReader and DataNode for a block. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 39e6f26c94..b20baa9bd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -55,7 +55,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSTestUtil.Builder; import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java index d26e2cb4e2..29306dc704 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -27,8 +29,6 @@ import org.junit.BeforeClass; import org.junit.Test; -import java.io.IOException; - /** This is a comprehensive append test that tries * all combinations of file length and number of appended bytes * In each iteration, it creates a file of len1. Then reopen diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java index 582767c801..9590bc3cf9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.fail; + import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; - -import static org.junit.Assert.*; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java index 117952a7e7..3f29932c1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java @@ -17,27 +17,27 @@ */ package org.apache.hadoop.hdfs; -import java.util.ArrayList; +import static org.junit.Assert.assertEquals; -import junit.framework.TestCase; -import org.apache.hadoop.conf.Configuration; +import java.util.ArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.junit.Test; /** * This test ensures that the balancer bandwidth is dynamically adjusted * correctly. */ -public class TestBalancerBandwidth extends TestCase { +public class TestBalancerBandwidth { final static private Configuration conf = new Configuration(); final static private int NUM_OF_DATANODES = 2; final static private int DEFAULT_BANDWIDTH = 1024*1024; public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class); + @Test public void testBalancerBandwidth() throws Exception { /* Set bandwidthPerSec to a low value of 1M bps. */ conf.setLong( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java index be6e741a0a..a885ff4b13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java @@ -17,26 +17,24 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.BlockMissingException; +import org.junit.Test; -public class TestBlockMissingException extends TestCase { +public class TestBlockMissingException { final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing"); final static int NUM_DATANODES = 3; @@ -47,6 +45,7 @@ public class TestBlockMissingException extends TestCase { /** * Test DFS Raid */ + @Test public void testBlockMissingException() throws Exception { LOG.info("Test testBlockMissingException started."); long blockSize = 1024L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java index e2547144fa..59dbb302c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java @@ -17,24 +17,26 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.util.ArrayList; -import junit.framework.TestCase; - import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.junit.Test; /** * This class tests DatanodeDescriptor.getBlocksScheduled() at the * NameNode. This counter is supposed to keep track of blocks currently * scheduled to a datanode. */ -public class TestBlocksScheduledCounter extends TestCase { +public class TestBlocksScheduledCounter { + @Test public void testBlocksScheduledCounter() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java index ec2d41c06d..e7a1e14dda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java @@ -18,21 +18,20 @@ package org.apache.hadoop.hdfs; -import java.util.List; - -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.fs.Path; -import org.apache.log4j.Level; - -import org.junit.Test; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.never; + +import java.util.List; + +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.log4j.Level; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; public class TestClientBlockVerification { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index a0b7b0d151..ee39cfe533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -26,11 +26,9 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.io.IOUtils; - -import org.junit.Test; import org.junit.Assert; +import org.junit.Test; /** * This tests pipeline recovery related client protocol works correct or not. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index eb5e08880f..354a17af9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -17,37 +17,33 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.spy; + +import java.io.IOException; import java.net.InetSocketAddress; import java.net.Socket; -import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSInputStream; -import org.apache.hadoop.hdfs.SocketCache; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.io.IOUtils; - import org.apache.hadoop.security.token.Token; -import org.junit.Test; import org.junit.AfterClass; import org.junit.BeforeClass; -import static org.junit.Assert.*; - +import org.junit.Test; import org.mockito.Matchers; import org.mockito.Mockito; -import org.mockito.stubbing.Answer; import org.mockito.invocation.InvocationOnMock; -import static org.mockito.Mockito.spy; +import org.mockito.stubbing.Answer; /** * This class tests the client connection caching in a single node diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java index 010d3ff9f6..15a26d3e43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java @@ -18,21 +18,23 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; -import java.io.RandomAccessFile; import java.io.IOException; +import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Random; -import org.junit.Test; -import static org.junit.Assert.*; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; +import org.junit.Test; /** * A JUnit test for corrupted file handling. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index d21592e485..c61c0b1a85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -24,20 +24,25 @@ */ package org.apache.hadoop.hdfs; -import java.io.IOException; -import java.util.ArrayList; -import junit.framework.TestCase; -import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.junit.Test; -public class TestDFSAddressConfig extends TestCase { +public class TestDFSAddressConfig { + @Test public void testDFSAddressConfig() throws IOException { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java index 8f4bb36380..dccc82f1a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java @@ -17,15 +17,15 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.fail; + import java.io.IOException; import java.io.OutputStream; -import org.junit.*; -import static org.junit.Assert.fail; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index b4d9fda8d4..5c5de9db5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyString; @@ -38,8 +42,6 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -75,6 +77,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; +import org.junit.Test; import org.mockito.Mockito; import org.mockito.internal.stubbing.answers.ThrowsException; import org.mockito.invocation.InvocationOnMock; @@ -86,7 +89,7 @@ * These tests make sure that DFSClient retries fetching data from DFS * properly in case of errors. */ -public class TestDFSClientRetries extends TestCase { +public class TestDFSClientRetries { private static final String ADDRESS = "0.0.0.0"; final static private int PING_INTERVAL = 1000; final static private int MIN_SLEEP_TIME = 1000; @@ -146,6 +149,7 @@ private static void writeData(OutputStream out, int len) throws IOException { * This makes sure that when DN closes clients socket after client had * successfully connected earlier, the data can still be fetched. */ + @Test public void testWriteTimeoutAtDataNode() throws IOException, InterruptedException { final int writeTimeout = 100; //milliseconds. @@ -198,6 +202,7 @@ public void testWriteTimeoutAtDataNode() throws IOException, * of times trying to add a block */ @SuppressWarnings("serial") + @Test public void testNotYetReplicatedErrors() throws IOException { final String exceptionMsg = "Nope, not replicated yet..."; @@ -242,6 +247,7 @@ public Object answer(InvocationOnMock invocation) * operation, and not over the lifetime of the stream. It is a regression * test for HDFS-127. */ + @Test public void testFailuresArePerOperation() throws Exception { long fileSize = 4096; @@ -317,6 +323,7 @@ public void testFailuresArePerOperation() throws Exception * a client to safely retry a call and still produce a correct * file. See HDFS-3031. */ + @Test public void testIdempotentAllocateBlockAndClose() throws Exception { final String src = "/testIdempotentAllocateBlock"; Path file = new Path(src); @@ -457,6 +464,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { /** * Test that a DFSClient waits for random time before retry on busy blocks. */ + @Test public void testDFSClientRetriesOnBusyBlocks() throws IOException { System.out.println("Testing DFSClient random waiting on busy blocks."); @@ -700,6 +708,7 @@ class Counter { public int get() { return counter; } } + @Test public void testGetFileChecksum() throws Exception { final String f = "/testGetFileChecksum"; final Path p = new Path(f); @@ -736,6 +745,7 @@ public void testGetFileChecksum() throws Exception { * RPC to the server and set rpcTimeout to less than n and ensure * that socketTimeoutException is obtained */ + @Test public void testClientDNProtocolTimeout() throws IOException { final Server server = new TestServer(1, true); server.start(); @@ -770,6 +780,7 @@ public void testClientDNProtocolTimeout() throws IOException { * read call, so the client should expect consecutive calls to behave the same * way. See HDFS-3067. */ + @Test public void testRetryOnChecksumFailure() throws UnresolvedLinkException, IOException { HdfsConfiguration conf = new HdfsConfiguration(); @@ -812,6 +823,7 @@ public void testRetryOnChecksumFailure() } /** Test client retry with namenode restarting. */ + @Test public void testNamenodeRestart() throws Exception { ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); @@ -937,6 +949,7 @@ public void run() { } } + @Test public void testMultipleLinearRandomRetry() { parseMultipleLinearRandomRetry(null, ""); parseMultipleLinearRandomRetry(null, "11"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index dc3998a77a..28ecf69e9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -17,17 +17,21 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import java.io.File; import java.util.Collections; import java.util.List; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.junit.After; +import org.junit.Test; import com.google.common.collect.Lists; @@ -35,7 +39,7 @@ * This test ensures the appropriate response from the system when * the system is finalized. */ -public class TestDFSFinalize extends TestCase { +public class TestDFSFinalize { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSFinalize"); @@ -86,6 +90,7 @@ DATA_NODE, new File(dataNodeDirs[i],"current")), /** * This test attempts to finalize the NameNode and DataNode. */ + @Test public void testFinalize() throws Exception { UpgradeUtilities.initialize(); @@ -125,8 +130,8 @@ public void testFinalize() throws Exception { } // end numDir loop } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java index 8a053f8c87..71f0c130bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java @@ -17,21 +17,27 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.DataOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This class tests that the DFS command mkdirs cannot create subdirectories * from a file when passed an illegal path. HADOOP-281. */ -public class TestDFSMkdirs extends TestCase { +public class TestDFSMkdirs { private void writeFile(FileSystem fileSys, Path name) throws IOException { DataOutputStream stm = fileSys.create(name); @@ -43,6 +49,7 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException { * Tests mkdirs can create a directory that does not exist and will * not create a subdirectory off a file. */ + @Test public void testDFSMkdirs() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -80,6 +87,7 @@ public void testDFSMkdirs() throws IOException { /** * Tests mkdir will not create directory when parent is missing. */ + @Test public void testMkdir() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 23dc0b9a5a..2fef3dcf1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Random; -import junit.framework.AssertionFailedError; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -35,13 +36,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** Unit tests for permission */ -public class TestDFSPermission extends TestCase { +public class TestDFSPermission { public static final Log LOG = LogFactory.getLog(TestDFSPermission.class); final private static Configuration conf = new HdfsConfiguration(); @@ -106,13 +109,13 @@ public class TestDFSPermission extends TestCase { } } - @Override + @Before public void setUp() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); } - @Override + @After public void tearDown() throws IOException { if (cluster != null) { cluster.shutdown(); @@ -122,6 +125,7 @@ public void tearDown() throws IOException { /** This tests if permission setting in create, mkdir, and * setPermission works correctly */ + @Test public void testPermissionSetting() throws Exception { testPermissionSetting(OpType.CREATE); // test file creation testPermissionSetting(OpType.MKDIRS); // test directory creation @@ -257,6 +261,7 @@ private void checkPermission(Path name, short expectedPermission, * check that ImmutableFsPermission can be used as the argument * to setPermission */ + @Test public void testImmutableFsPermission() throws IOException { fs = FileSystem.get(conf); @@ -266,6 +271,7 @@ public void testImmutableFsPermission() throws IOException { } /* check if the ownership of a file/directory is set correctly */ + @Test public void testOwnership() throws Exception { testOwnership(OpType.CREATE); // test file creation testOwnership(OpType.MKDIRS); // test directory creation @@ -354,6 +360,7 @@ private enum OpType {CREATE, MKDIRS, OPEN, SET_REPLICATION, /* Check if namenode performs permission checking correctly for * superuser, file owner, group owner, and other users */ + @Test public void testPermissionChecking() throws Exception { try { fs = FileSystem.get(conf); @@ -533,7 +540,7 @@ void verifyPermission(UserGroupInformation ugi) throws IOException { } catch(AccessControlException e) { assertTrue(expectPermissionDeny()); } - } catch (AssertionFailedError ae) { + } catch (AssertionError ae) { logPermissions(); throw ae; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java index 1b23c5f319..7630dd650c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java @@ -16,6 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.DataOutputStream; import java.io.IOException; @@ -26,8 +28,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.junit.Test; -public class TestDFSRemove extends junit.framework.TestCase { +public class TestDFSRemove { final Path dir = new Path("/test/remove/"); void list(FileSystem fs, String name) throws IOException { @@ -51,6 +54,7 @@ static long getTotalDfsUsed(MiniDFSCluster cluster) throws IOException { return total; } + @Test public void testRemove() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index ce1c62b48b..1c00e50993 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -16,6 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.DataOutputStream; import java.io.IOException; @@ -25,8 +28,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.Test; -public class TestDFSRename extends junit.framework.TestCase { +public class TestDFSRename { static int countLease(MiniDFSCluster cluster) { return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease(); } @@ -46,6 +50,7 @@ static void createFile(FileSystem fs, Path f) throws IOException { a_out.close(); } + @Test public void testRename() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 253edba2c1..18bd79fd9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -19,22 +19,25 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.List; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; +import org.junit.After; +import org.junit.Test; import com.google.common.base.Charsets; import com.google.common.collect.Lists; @@ -44,7 +47,7 @@ * the system when the system is rolled back under various storage state and * version conditions. */ -public class TestDFSRollback extends TestCase { +public class TestDFSRollback { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSRollback"); @@ -131,6 +134,7 @@ void startBlockPoolShouldFail(StartupOption operation, String bpid) * This test attempts to rollback the NameNode and DataNode under * a number of valid and invalid conditions. */ + @Test public void testRollback() throws Exception { File[] baseDirs; UpgradeUtilities.initialize(); @@ -299,8 +303,8 @@ private void deleteMatchingFiles(File[] baseDirs, String regex) { } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 30cb991745..ce402b1878 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.File; @@ -33,8 +37,6 @@ import java.util.Scanner; import java.util.zip.GZIPOutputStream; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -53,11 +55,12 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; /** * This class tests commands from DFSShell. */ -public class TestDFSShell extends TestCase { +public class TestDFSShell { private static final Log LOG = LogFactory.getLog(TestDFSShell.class); static final String TEST_ROOT_DIR = @@ -94,6 +97,7 @@ static void show(String s) { System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s); } + @Test public void testZeroSizeFile() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -136,6 +140,7 @@ public void testZeroSizeFile() throws IOException { } } + @Test public void testRecrusiveRm() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -161,6 +166,7 @@ public void testRecrusiveRm() throws IOException { } } + @Test public void testDu() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -210,6 +216,7 @@ public void testDu() throws IOException { } } + @Test public void testPut() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -308,6 +315,7 @@ public void checkPermission(Permission perm) { /** check command error outputs and exit statuses. */ + @Test public void testErrOutPut() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -448,6 +456,7 @@ public void testErrOutPut() throws Exception { } } + @Test public void testURIPaths() throws Exception { Configuration srcConf = new HdfsConfiguration(); Configuration dstConf = new HdfsConfiguration(); @@ -540,6 +549,7 @@ public void testURIPaths() throws Exception { } } + @Test public void testText() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -614,6 +624,7 @@ private void textTest(Path root, Configuration conf) throws Exception { } } + @Test public void testCopyToLocal() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -711,6 +722,7 @@ static String createTree(FileSystem fs, String name) throws IOException { return path; } + @Test public void testCount() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -877,6 +889,7 @@ private void confirmOwner(String owner, String group, } } + @Test public void testFilePermissions() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -942,6 +955,7 @@ public void testFilePermissions() throws IOException { /** * Tests various options of DFSShell. */ + @Test public void testDFSShell() throws IOException { Configuration conf = new HdfsConfiguration(); /* This tests some properties of ChecksumFileSystem as well. @@ -1209,6 +1223,7 @@ static interface TestGetRunner { String run(int exitcode, String... options) throws IOException; } + @Test public void testRemoteException() throws Exception { UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"}); @@ -1252,6 +1267,7 @@ public Object run() throws Exception { } } + @Test public void testGet() throws IOException { DFSTestUtil.setLogLevel2All(FSInputChecker.LOG); final Configuration conf = new HdfsConfiguration(); @@ -1312,6 +1328,7 @@ public String run(int exitcode, String... options) throws IOException { } } + @Test public void testLsr() throws Exception { final Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -1369,6 +1386,7 @@ private static String runLsr(final FsShell shell, String root, int returnvalue * and return -1 exit code. * @throws Exception */ + @Test public void testInvalidShell() throws Exception { Configuration conf = new Configuration(); // default FS (non-DFS) DFSAdmin admin = new DFSAdmin(); @@ -1378,6 +1396,7 @@ public void testInvalidShell() throws Exception { } // force Copy Option is -f + @Test public void testCopyCommandsWithForceOption() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java index 402c6e804a..2b37e2e3cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java @@ -17,22 +17,24 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; -public class TestDFSShellGenericOptions extends TestCase { +public class TestDFSShellGenericOptions { + @Test public void testDFSCommand() throws IOException { String namenode = null; MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java index 8a96c89a3d..797d5ca38c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java @@ -19,25 +19,27 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.junit.After; +import org.junit.Test; /** * This test ensures the appropriate response (successful or failure) from * a Datanode when the system is started with differing version combinations. */ -public class TestDFSStartupVersions extends TestCase { +public class TestDFSStartupVersions { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSStartupVersions"); @@ -235,6 +237,7 @@ boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) { * this iterations version 3-tuple * */ + @Test public void testVersions() throws Exception { UpgradeUtilities.initialize(); Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, @@ -276,8 +279,8 @@ public void testVersions() throws Exception { } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index 80a415e4d4..a3d54c00c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -17,25 +17,32 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; -import junit.framework.TestCase; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; - -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * This test ensures the appropriate response (successful or failure) from * the system when the system is started under various storage state and * version conditions. */ -public class TestDFSStorageStateRecovery extends TestCase { +public class TestDFSStorageStateRecovery { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSStorageStateRecovery"); @@ -311,6 +318,7 @@ private MiniDFSCluster createCluster(Configuration c) throws IOException { * This test iterates over the testCases table and attempts * to startup the NameNode normally. */ + @Test public void testNNStorageStates() throws Exception { String[] baseDirs; @@ -354,6 +362,7 @@ public void testNNStorageStates() throws Exception { * This test iterates over the testCases table for Datanode storage and * attempts to startup the DataNode normally. */ + @Test public void testDNStorageStates() throws Exception { String[] baseDirs; @@ -394,6 +403,7 @@ public void testDNStorageStates() throws Exception { * This test iterates over the testCases table for block pool storage and * attempts to startup the DataNode normally. */ + @Test public void testBlockPoolStorageStates() throws Exception { String[] baseDirs; @@ -431,15 +441,15 @@ public void testBlockPoolStorageStates() throws Exception { } // end numDirs loop } - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { LOG.info("Setting up the directory structures."); UpgradeUtilities.initialize(); } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index ad3e6d8c55..b087968363 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -19,6 +19,13 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; +import static org.apache.hadoop.test.GenericTestUtils.assertExists; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; @@ -27,14 +34,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - -import static org.apache.hadoop.test.GenericTestUtils.assertExists; import org.apache.hadoop.util.StringUtils; import org.junit.BeforeClass; import org.junit.Ignore; @@ -43,8 +46,6 @@ import com.google.common.base.Charsets; import com.google.common.base.Joiner; -import static org.junit.Assert.*; - /** * This test ensures the appropriate response (successful or failure) from * the system when the system is upgraded under various storage state and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index ba92c569d9..8db1741e82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -18,13 +18,22 @@ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; import java.util.Iterator; import java.util.LinkedList; import java.util.TreeMap; import java.util.zip.CRC32; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileStatus; @@ -34,8 +43,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.junit.Test; /** * This tests data transfer protocol handling in the Datanode. It sends @@ -46,7 +54,7 @@ * 2) hadoop-dfs-dir.txt : checksums that are compared in this test. * Please read hadoop-dfs-dir.txt for more information. */ -public class TestDFSUpgradeFromImage extends TestCase { +public class TestDFSUpgradeFromImage { private static final Log LOG = LogFactory .getLog(TestDFSUpgradeFromImage.class); @@ -182,6 +190,7 @@ private void verifyFileSystem(DistributedFileSystem dfs) throws IOException { * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a * NN, verifying that the correct error message is thrown. */ + @Test public void testFailOnPreUpgradeImage() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -225,6 +234,7 @@ public void testFailOnPreUpgradeImage() throws IOException { /** * Test upgrade from 0.22 image */ + @Test public void testUpgradeFromRel22Image() throws IOException { unpackStorage(HADOOP22_IMAGE); upgradeAndVerify(); @@ -234,6 +244,7 @@ public void testUpgradeFromRel22Image() throws IOException { * Test upgrade from 0.22 image with corrupt md5, make sure it * fails to upgrade */ + @Test public void testUpgradeFromCorruptRel22Image() throws IOException { unpackStorage(HADOOP22_IMAGE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 6a0f4e76ce..af1f6d6ea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -18,10 +18,22 @@ package org.apache.hadoop.hdfs; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.*; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.net.InetSocketAddress; @@ -34,18 +46,18 @@ import java.util.Map; import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Before; +import org.junit.Test; public class TestDFSUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index 1ef4eac997..5699c10171 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -20,7 +20,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.InputStream; import java.io.PrintWriter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index 6db59d281f..a766263707 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -18,6 +18,10 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -28,8 +32,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -40,11 +42,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This test verifies that block verification occurs on the datanode */ -public class TestDatanodeBlockScanner extends TestCase { +public class TestDatanodeBlockScanner { private static final Log LOG = LogFactory.getLog(TestDatanodeBlockScanner.class); @@ -118,6 +121,7 @@ private static long waitForVerification(int infoPort, FileSystem fs, return verificationTime; } + @Test public void testDatanodeBlockScanner() throws IOException, TimeoutException { long startTime = Time.now(); @@ -168,6 +172,7 @@ public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOEx return MiniDFSCluster.corruptReplica(replica, blk); } + @Test public void testBlockCorruptionPolicy() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); @@ -232,12 +237,14 @@ public void testBlockCorruptionPolicy() throws IOException { * 4. Test again waits until the block is reported with expected number * of good replicas. */ + @Test public void testBlockCorruptionRecoveryPolicy1() throws Exception { // Test recovery of 1 corrupt replica LOG.info("Testing corrupt replica recovery for one corrupt replica"); blockCorruptionRecoveryPolicy(4, (short)3, 1); } + @Test public void testBlockCorruptionRecoveryPolicy2() throws Exception { // Test recovery of 2 corrupt replicas LOG.info("Testing corrupt replica recovery for two corrupt replicas"); @@ -302,6 +309,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes, } /** Test if NameNode handles truncated blocks in block report */ + @Test public void testTruncatedBlockReport() throws Exception { final Configuration conf = new HdfsConfiguration(); final short REPLICATION_FACTOR = (short)2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index 5383fa5437..a6625431fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hdfs; -import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; -import junit.framework.TestCase; +import java.io.IOException; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -31,17 +32,18 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests that pipelines survive data node death and recovery. */ -public class TestDatanodeDeath extends TestCase { +public class TestDatanodeDeath { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); @@ -410,11 +412,15 @@ private void simpleTest(int datanodeToKill) throws IOException { } } + @Test public void testSimple0() throws IOException {simpleTest(0);} + @Test public void testSimple1() throws IOException {simpleTest(1);} + @Test public void testSimple2() throws IOException {simpleTest(2);} + @Test public void testComplex() throws IOException {complexTest();} } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 71c898bf51..4d32b1f6a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -19,7 +19,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index a27739ec76..d5802b0b54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -17,27 +17,30 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; + import java.net.InetSocketAddress; import java.util.ArrayList; -import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; - import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import static org.apache.hadoop.test.MetricsAsserts.*; +import org.junit.Test; /** * This test ensures the all types of data node report work correctly. */ -public class TestDatanodeReport extends TestCase { +public class TestDatanodeReport { final static private Configuration conf = new HdfsConfiguration(); final static private int NUM_OF_DATANODES = 4; /** * This test attempts to different types of datanode report. */ + @Test public void testDatanodeReport() throws Exception { conf.setInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java index 6a66e947e7..27f13e3d25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.net.InetSocketAddress; import java.net.URI; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.junit.Test; /** Test NameNode port defaulting code. */ -public class TestDefaultNameNodePort extends TestCase { +public class TestDefaultNameNodePort { + @Test public void testGetAddressFromString() throws Exception { assertEquals(NameNode.getAddress("foo").getPort(), NameNode.DEFAULT_PORT); @@ -40,6 +42,7 @@ public void testGetAddressFromString() throws Exception { 555); } + @Test public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); @@ -50,6 +53,7 @@ public void testGetAddressFromConf() throws Exception { assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); } + @Test public void testGetUri() { assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)), URI.create("hdfs://foo:555")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java index 518adddf10..7152092245 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java @@ -18,13 +18,15 @@ package org.apache.hadoop.hdfs; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.conf.Configuration; -import junit.framework.TestCase; +import static org.junit.Assert.assertTrue; -public class TestDeprecatedKeys extends TestCase { +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +public class TestDeprecatedKeys { //Tests a deprecated key + @Test public void testDeprecatedKeys() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set("topology.script.file.name", "xyz"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java index fea024c2c7..1faff65727 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java @@ -17,14 +17,16 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -35,11 +37,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.junit.Test; /** * This class tests if FSInputChecker works correctly. */ -public class TestFSInputChecker extends TestCase { +public class TestFSInputChecker { static final long seed = 0xDEADBEEFL; static final int BYTES_PER_SUM = 10; static final int BLOCK_SIZE = 2*BYTES_PER_SUM; @@ -291,6 +294,7 @@ private void checkFileCorruption(LocalFileSystem fileSys, Path file, in.close(); } + @Test public void testFSInputChecker() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java index da18bbe0cc..a3b3f808eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java @@ -17,21 +17,24 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; -import java.util.Random; -import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * This class tests if FSOutputSummer works correctly. */ -public class TestFSOutputSummer extends TestCase { +public class TestFSOutputSummer { private static final long seed = 0xDEADBEEFL; private static final int BYTES_PER_CHECKSUM = 10; private static final int BLOCK_SIZE = 2*BYTES_PER_CHECKSUM; @@ -111,6 +114,7 @@ private void cleanupFile(Path name) throws IOException { /** * Test write opeation for output stream in DFS. */ + @Test public void testFSOutputSummer() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index 20a7b5b983..f488040c49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -17,12 +17,16 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import junit.framework.TestCase; - +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -36,16 +40,14 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests the building blocks that are needed to * support HDFS appends. */ -public class TestFileAppend2 extends TestCase { +public class TestFileAppend2 { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); @@ -79,6 +81,7 @@ public class TestFileAppend2 extends TestCase { * Verify that all data exists in file. * @throws IOException an exception might be thrown */ + @Test public void testSimpleAppend() throws IOException { final Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -329,6 +332,7 @@ public void run() { * Test that appends to files at random offsets. * @throws IOException an exception might be thrown */ + @Test public void testComplexAppend() throws IOException { fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java index 038edd8d2b..a2ab1edda2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -42,7 +46,6 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.*; /** This class implements some of tests posted in HADOOP-2658. */ public class TestFileAppend3 { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java index 6b18965687..d086c77a9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java @@ -23,7 +23,6 @@ import java.io.File; import java.io.IOException; import java.util.EnumMap; -import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -32,8 +31,6 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.apache.hadoop.hdfs.server.namenode.FSEditLog; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NNStorage; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java index 632d8cc35e..fae302d5c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hdfs; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; @@ -33,10 +38,6 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import java.io.IOException; -import java.util.*; -import java.util.concurrent.atomic.*; - /** * This class tests the cases of a concurrent reads/writes to a file; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index a8624400aa..458880af56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -18,14 +18,16 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.util.ArrayList; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -42,11 +44,12 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.log4j.Level; +import org.junit.Test; /** * A JUnit test for corrupted file handling. */ -public class TestFileCorruption extends TestCase { +public class TestFileCorruption { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); @@ -56,6 +59,7 @@ public class TestFileCorruption extends TestCase { static Log LOG = ((Log4JLogger)NameNode.stateChangeLog); /** check if DFS can handle corrupted blocks properly */ + @Test public void testFileCorruption() throws Exception { MiniDFSCluster cluster = null; DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption"). @@ -88,6 +92,7 @@ public void testFileCorruption() throws Exception { } /** check if local FS can handle corrupted blocks properly */ + @Test public void testLocalFileCorruption() throws Exception { Configuration conf = new HdfsConfiguration(); Path file = new Path(System.getProperty("test.build.data"), "corruptFile"); @@ -114,6 +119,7 @@ public void testLocalFileCorruption() throws Exception { * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown. * See Hadoop-4351. */ + @Test public void testArrayOutOfBoundsException() throws Exception { MiniDFSCluster cluster = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index c2f630c45b..77eb3f8657 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -16,7 +16,6 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; - import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; @@ -31,6 +30,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; import java.io.BufferedReader; @@ -70,11 +72,12 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests various cases during file creation. */ -public class TestFileCreation extends junit.framework.TestCase { +public class TestFileCreation { static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/"; { @@ -123,6 +126,7 @@ public static void writeFile(FSDataOutputStream stm, int size) throws IOExceptio /** * Test that server default values can be retrieved on the client side */ + @Test public void testServerDefaults() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); @@ -148,11 +152,13 @@ public void testServerDefaults() throws IOException { } } + @Test public void testFileCreation() throws IOException { checkFileCreation(null); } /** Same test but the client should bind to a local interface */ + @Test public void testFileCreationSetLocalInterface() throws IOException { assumeTrue(System.getProperty("os.name").startsWith("Linux")); @@ -255,6 +261,7 @@ public void checkFileCreation(String netIf) throws IOException { /** * Test deleteOnExit */ + @Test public void testDeleteOnExit() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -317,6 +324,7 @@ public void testDeleteOnExit() throws IOException { /** * Test that file data does not become corrupted even in the face of errors. */ + @Test public void testFileCreationError1() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); @@ -389,6 +397,7 @@ public void testFileCreationError1() throws IOException { * Test that the filesystem removes the last block from a file if its * lease expires. */ + @Test public void testFileCreationError2() throws IOException { long leasePeriod = 1000; System.out.println("testFileCreationError2 start"); @@ -454,6 +463,7 @@ public void testFileCreationError2() throws IOException { } /** test addBlock(..) when replication map = new HashMap(); final Random RAN = new Random(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 9051fd2d77..036252ddc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -17,25 +17,26 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.net.UnknownHostException; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; -import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.BackupNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNS; +import org.junit.Test; /** * This test checks correctness of port usage by hdfs components: @@ -47,7 +48,7 @@ * - if the port = 0 (ephemeral) then the server should choose * a free port and start on it. */ -public class TestHDFSServerPorts extends TestCase { +public class TestHDFSServerPorts { public static final Log LOG = LogFactory.getLog(TestHDFSServerPorts.class); // reset default 0.0.0.0 addresses in order to avoid IPv6 problem @@ -250,6 +251,7 @@ private boolean canStartBackupNode(Configuration conf) throws IOException { return true; } + @Test public void testNameNodePorts() throws Exception { runTestNameNodePorts(false); runTestNameNodePorts(true); @@ -300,6 +302,7 @@ public void runTestNameNodePorts(boolean withService) throws Exception { /** * Verify datanode port usage. */ + @Test public void testDataNodePorts() throws Exception { NameNode nn = null; try { @@ -335,6 +338,7 @@ public void testDataNodePorts() throws Exception { /** * Verify secondary namenode port usage. */ + @Test public void testSecondaryNodePorts() throws Exception { NameNode nn = null; try { @@ -363,6 +367,7 @@ public void testSecondaryNodePorts() throws Exception { /** * Verify BackupNode port usage. */ + @Test public void testBackupNodePorts() throws Exception { NameNode nn = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java index 2a34cf9d03..feaca8c996 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.InterruptedIOException; + import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -24,15 +31,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.log4j.Level; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import org.junit.Test; -import java.io.InterruptedIOException; -import java.io.IOException; - /** Class contains a set of tests to verify the correctness of * newly introduced {@link FSDataOutputStream#hflush()} method */ public class TestHFlush { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index 66e8e85e61..bdf4c02d4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java index 8417a53853..e7df010213 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java @@ -18,15 +18,16 @@ package org.apache.hadoop.hdfs; -import static - org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; import java.io.IOException; import java.lang.reflect.Field; import java.net.URI; import java.security.PrivilegedExceptionAction; -import org.junit.Test; -import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -36,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.junit.Test; public class TestHftpDelegationToken { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java index 57bfe58c3f..5e20d46e2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java @@ -18,19 +18,18 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.io.InputStream; -import java.net.URISyntaxException; -import java.net.URI; -import java.net.URL; import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; import java.util.Random; -import org.junit.Test; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import static org.junit.Assert.*; - import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -38,12 +37,14 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.util.ServletUtil; import org.apache.log4j.Level; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; public class TestHftpFileSystem { private static final Random RAN = new Random(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java index 483d184d92..0f8d7d00e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java @@ -27,7 +27,6 @@ import java.net.ServerSocket; import java.net.SocketTimeoutException; import java.net.URI; -import java.net.URLConnection; import java.util.LinkedList; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java index fa029409b7..ab28ce27d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.net.InetSocketAddress; import java.util.HashSet; import java.util.Set; -import java.net.*; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,12 +37,13 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This class tests the replication and injection of blocks of a DFS file for simulated storage. */ -public class TestInjectionForSimulatedStorage extends TestCase { +public class TestInjectionForSimulatedStorage { private int checksumSize = 16; private int blockSize = checksumSize*2; private int numBlocks = 4; @@ -122,6 +123,7 @@ private void waitForBlockReplication(String filename, * The blocks are then injected in one of the DNs. The expected behaviour is * that the NN will arrange for themissing replica will be copied from a valid source. */ + @Test public void testInjection() throws IOException { MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java index 65a0465bd4..1f42c0d4de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.net.InetSocketAddress; -import junit.framework.Assert; - import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB; @@ -78,16 +79,16 @@ public void testNamenodeProtocol() throws IOException { nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); boolean exists = translator.isMethodSupported("rollEditLog"); - Assert.assertTrue(exists); + assertTrue(exists); exists = translator.isMethodSupported("bogusMethod"); - Assert.assertFalse(exists); + assertFalse(exists); } @Test public void testDatanodeProtocol() throws IOException { DatanodeProtocolClientSideTranslatorPB translator = new DatanodeProtocolClientSideTranslatorPB(nnAddress, conf); - Assert.assertTrue(translator.isMethodSupported("sendHeartbeat")); + assertTrue(translator.isMethodSupported("sendHeartbeat")); } @Test @@ -97,12 +98,12 @@ public void testClientDatanodeProtocol() throws IOException { UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf)); //Namenode doesn't implement ClientDatanodeProtocol - Assert.assertFalse(translator.isMethodSupported("refreshNamenodes")); + assertFalse(translator.isMethodSupported("refreshNamenodes")); translator = new ClientDatanodeProtocolTranslatorPB( dnAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf)); - Assert.assertTrue(translator.isMethodSupported("refreshNamenodes")); + assertTrue(translator.isMethodSupported("refreshNamenodes")); } @Test @@ -111,7 +112,7 @@ public void testClientNamenodeProtocol() throws IOException { (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy( conf, nnAddress, ClientProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue(translator.isMethodSupported("mkdirs")); + assertTrue(translator.isMethodSupported("mkdirs")); } @Test @@ -120,7 +121,7 @@ public void tesJournalProtocol() throws IOException { NameNodeProxies.createNonHAProxy(conf, nnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); //Nameode doesn't implement JournalProtocol - Assert.assertFalse(translator.isMethodSupported("startLogSegment")); + assertFalse(translator.isMethodSupported("startLogSegment")); } @Test @@ -130,12 +131,12 @@ public void testInterDatanodeProtocol() throws IOException { nnAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), 0); //Not supported at namenode - Assert.assertFalse(translator.isMethodSupported("initReplicaRecovery")); + assertFalse(translator.isMethodSupported("initReplicaRecovery")); translator = new InterDatanodeProtocolTranslatorPB( dnAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), 0); - Assert.assertTrue(translator.isMethodSupported("initReplicaRecovery")); + assertTrue(translator.isMethodSupported("initReplicaRecovery")); } @Test @@ -145,7 +146,7 @@ public void testGetUserMappingsProtocol() throws IOException { NameNodeProxies.createNonHAProxy(conf, nnAddress, GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue(translator.isMethodSupported("getGroupsForUser")); + assertTrue(translator.isMethodSupported("getGroupsForUser")); } @Test @@ -155,7 +156,7 @@ public void testRefreshAuthorizationPolicyProtocol() throws IOException { NameNodeProxies.createNonHAProxy(conf, nnAddress, RefreshAuthorizationPolicyProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue(translator.isMethodSupported("refreshServiceAcl")); + assertTrue(translator.isMethodSupported("refreshServiceAcl")); } @Test @@ -165,7 +166,7 @@ public void testRefreshUserMappingsProtocol() throws IOException { NameNodeProxies.createNonHAProxy(conf, nnAddress, RefreshUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); - Assert.assertTrue( + assertTrue( translator.isMethodSupported("refreshUserToGroupsMappings")); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index b66bf37ab4..3ca1cf3ec2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Arrays; @@ -30,7 +32,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.junit.Test; -import static org.junit.Assert.assertTrue; /** * This class tests that blocks can be larger than 2GB diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index b6bc837fec..82e25c3104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -17,34 +17,31 @@ */ package org.apache.hadoop.hdfs; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; + import java.io.DataOutputStream; import java.io.IOException; import java.security.PrivilegedExceptionAction; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.doNothing; public class TestLease { static boolean hasLease(MiniDFSCluster cluster, Path src) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index 98b8bdf592..e739f61afa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -16,6 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -31,8 +33,9 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.Test; -public class TestLeaseRecovery extends junit.framework.TestCase { +public class TestLeaseRecovery { static final int BLOCK_SIZE = 1024; static final short REPLICATION_NUM = (short)3; private static final long LEASE_PERIOD = 300L; @@ -66,6 +69,7 @@ void waitLeaseRecovery(MiniDFSCluster cluster) { * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. */ + @Test public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE = 3000; Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java index d5126e2e5e..928dbbb6a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java @@ -17,12 +17,11 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.*; +import static org.junit.Assert.assertSame; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java index 797130c1aa..ec9e7e2e48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java @@ -22,7 +22,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.TestListFiles; import org.apache.log4j.Level; - import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java index 2e3f6810ec..557f3ac09c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.EnumSet; import java.util.Random; @@ -33,13 +37,10 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.log4j.Level; - -import static org.junit.Assert.*; - import org.junit.After; import org.junit.AfterClass; -import org.junit.Test; import org.junit.BeforeClass; +import org.junit.Test; /** * This class tests the FileStatus API. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java index 32af360920..7a5e3d046a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java @@ -17,12 +17,17 @@ */ package org.apache.hadoop.hdfs; -import org.junit.Test; -import static org.junit.Assert.*; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * This class tests the DFS class via the FileSystem interface in a single node diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index 0eec0d1877..a7fd82aea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -18,15 +18,17 @@ package org.apache.hadoop.hdfs; -import junit.framework.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.junit.After; import org.junit.Before; import org.junit.Test; -import java.io.File; - /** * Tests MiniDFS cluster setup/teardown and isolation. * Every instance is brought up with a new data dir, to ensure that @@ -70,7 +72,7 @@ public void testClusterWithoutSystemProperties() throws Throwable { conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { - Assert.assertEquals(c1Path+"/data", cluster.getDataDirectory()); + assertEquals(c1Path+"/data", cluster.getDataDirectory()); } finally { cluster.shutdown(); } @@ -91,14 +93,14 @@ public void testDualClusters() throws Throwable { MiniDFSCluster cluster3 = null; try { String dataDir2 = cluster2.getDataDirectory(); - Assert.assertEquals(c2Path + "/data", dataDir2); + assertEquals(c2Path + "/data", dataDir2); //change the data dir conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataCluster3.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); cluster3 = builder.build(); String dataDir3 = cluster3.getDataDirectory(); - Assert.assertTrue("Clusters are bound to the same directory: " + dataDir2, + assertTrue("Clusters are bound to the same directory: " + dataDir2, !dataDir2.equals(dataDir3)); } finally { MiniDFSCluster.shutdownCluster(cluster3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java index 26f42631fd..5516ced5fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.net.URL; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -30,17 +32,19 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.junit.Test; /** * The test makes sure that NameNode detects presense blocks that do not have * any valid replicas. In addition, it verifies that HDFS front page displays * a warning in such a case. */ -public class TestMissingBlocksAlert extends TestCase { +public class TestMissingBlocksAlert { private static final Log LOG = LogFactory.getLog(TestMissingBlocksAlert.class); + @Test public void testMissingBlocksAlert() throws IOException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java index 5ba5681a58..f30de965fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java @@ -17,23 +17,27 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Random; -import java.net.*; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.junit.Test; /** * This class tests the decommissioning of nodes. */ -public class TestModTime extends TestCase { +public class TestModTime { static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; static final int fileSize = 16384; @@ -74,6 +78,7 @@ private void printDatanodeReport(DatanodeInfo[] info) { /** * Tests modification time in DFS. */ + @Test public void testModTime() throws IOException { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index 61c93fe6f0..8d320903a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.junit.Test; - -import java.io.*; +import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -28,15 +26,16 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.log4j.Level; +import org.junit.Test; /** * This class tests hflushing concurrently from many threads. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index e3fce6e0fa..dacd4bca99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -17,21 +17,22 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.IOException; import java.nio.ByteBuffer; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.LogManager; -import static org.junit.Assert.*; - /** * Driver class for testing the use of DFSInputStream by multiple concurrent * readers, using the different read APIs. See subclasses for the actual test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index 6378d42644..55695415d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -17,24 +17,25 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.DataOutputStream; import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.junit.Test; /** * This class tests the DFS positional read functionality in a single node * mini-cluster. */ -public class TestPread extends TestCase { +public class TestPread { static final long seed = 0xDEADBEEFL; static final int blockSize = 4096; boolean simulatedStorage = false; @@ -196,6 +197,7 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException { /** * Tests positional read in DFS. */ + @Test public void testPreadDFS() throws IOException { dfsPreadTest(false); //normal pread dfsPreadTest(true); //trigger read code path without transferTo. @@ -225,6 +227,7 @@ private void dfsPreadTest(boolean disableTransferTo) throws IOException { } } + @Test public void testPreadDFSSimulated() throws IOException { simulatedStorage = true; testPreadDFS(); @@ -234,6 +237,7 @@ public void testPreadDFSSimulated() throws IOException { /** * Tests positional read in LocalFS. */ + @Test public void testPreadLocalFS() throws IOException { Configuration conf = new HdfsConfiguration(); FileSystem fileSys = FileSystem.getLocal(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 19818a6633..600829b118 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.OutputStream; import java.security.PrivilegedExceptionAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java index 6189a96f81..ee5d50aa02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -29,8 +30,9 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.log4j.Level; +import org.junit.Test; -public class TestRenameWhileOpen extends junit.framework.TestCase { +public class TestRenameWhileOpen { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); @@ -47,6 +49,7 @@ private static void checkFullFile(FileSystem fs, Path p) throws IOException { * mkdir /user/dir3 * move /user/dir1 /user/dir3 */ + @Test public void testWhileOpenRenameParent() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -132,6 +135,7 @@ public void testWhileOpenRenameParent() throws IOException { * open /user/dir1/file1 /user/dir2/file2 * move /user/dir1 /user/dir3 */ + @Test public void testWhileOpenRenameParentToNonexistentDir() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -206,6 +210,7 @@ public void testWhileOpenRenameParentToNonexistentDir() throws IOException { * mkdir /user/dir2 * move /user/dir1/file1 /user/dir2/ */ + @Test public void testWhileOpenRenameToExistentDirectory() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -270,6 +275,7 @@ public void testWhileOpenRenameToExistentDirectory() throws IOException { * open /user/dir1/file1 * move /user/dir1/file1 /user/dir2/ */ + @Test public void testWhileOpenRenameToNonExistentDirectory() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 5fee500ab3..617bf7d86d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 994da701bc..e819e023b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.OutputStream; @@ -25,8 +28,6 @@ import java.util.Iterator; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,16 +40,17 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This class tests the replication of a DFS file. */ -public class TestReplication extends TestCase { +public class TestReplication { private static final long seed = 0xDEADBEEFL; private static final int blockSize = 8192; private static final int fileSize = 16384; @@ -149,6 +151,7 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException { /* * Test if Datanode reports bad blocks during replication request */ + @Test public void testBadBlockReportOnTransfer() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem fs = null; @@ -240,11 +243,13 @@ public void runReplication(boolean simulated) throws IOException { } + @Test public void testReplicationSimulatedStorag() throws IOException { runReplication(true); } + @Test public void testReplication() throws IOException { runReplication(false); } @@ -298,6 +303,7 @@ private void waitForBlockReplication(String filename, * two of the blocks and removes one of the replicas. Expected behavior is * that missing replica will be copied from one valid source. */ + @Test public void testPendingReplicationRetry() throws IOException { MiniDFSCluster cluster = null; @@ -400,6 +406,7 @@ public void testPendingReplicationRetry() throws IOException { * Test if replication can detect mismatched length on-disk blocks * @throws Exception */ + @Test public void testReplicateLenMismatchedBlock() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java index 7e130c9852..f6345a3eda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java @@ -18,17 +18,19 @@ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * A JUnit test for checking if restarting DFS preserves integrity. */ -public class TestRestartDFS extends TestCase { +public class TestRestartDFS { public void runTests(Configuration conf, boolean serviceTest) throws Exception { MiniDFSCluster cluster = null; DFSTestUtil files = new DFSTestUtil.Builder().setName("TestRestartDFS"). @@ -110,6 +112,7 @@ public void runTests(Configuration conf, boolean serviceTest) throws Exception { } } /** check if DFS remains in proper condition after a restart */ + @Test public void testRestartDFS() throws Exception { final Configuration conf = new HdfsConfiguration(); runTests(conf, false); @@ -118,6 +121,7 @@ public void testRestartDFS() throws Exception { /** check if DFS remains in proper condition after a restart * this rerun is with 2 ports enabled for RPC in the namenode */ + @Test public void testRestartDualPortDFS() throws Exception { final Configuration conf = new HdfsConfiguration(); runTests(conf, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 1c3ada5318..6eab01090c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -18,30 +18,33 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; - -import static org.junit.Assert.*; -import org.junit.Before; import org.junit.After; +import org.junit.Before; import org.junit.Test; import com.google.common.base.Supplier; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java index a34e00a03c..c2f1cf39c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java @@ -17,24 +17,26 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.DataOutputStream; import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumFileSystem; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; +import org.junit.Test; /** * This class tests the presence of seek bug as described * in HADOOP-508 */ -public class TestSeekBug extends TestCase { +public class TestSeekBug { static final long seed = 0xDEADBEEFL; static final int ONEMB = 1 << 20; @@ -123,6 +125,7 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException { /** * Test if the seek bug exists in FSDataInputStream in DFS. */ + @Test public void testSeekBugDFS() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -142,6 +145,7 @@ public void testSeekBugDFS() throws IOException { /** * Tests if the seek bug exists in FSDataInputStream in LocalFS. */ + @Test public void testSeekBugLocalFS() throws IOException { Configuration conf = new HdfsConfiguration(); FileSystem fileSys = FileSystem.getLocal(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java index f7ea537be1..18341ca955 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java @@ -17,27 +17,33 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.text.SimpleDateFormat; +import java.util.Date; import java.util.Random; -import java.net.*; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.util.Time; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; -import java.text.SimpleDateFormat; -import java.util.Date; +import org.junit.Test; /** * This class tests the access time on files. * */ -public class TestSetTimes extends TestCase { +public class TestSetTimes { static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; static final int fileSize = 16384; @@ -78,6 +84,7 @@ private void printDatanodeReport(DatanodeInfo[] info) { /** * Tests mod & access time in DFS. */ + @Test public void testTimes() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s @@ -209,6 +216,7 @@ public void testTimes() throws IOException { /** * Tests mod time change at close in DFS. */ + @Test public void testTimesAtClose() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java index de6c8a47e4..fb81086377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java @@ -19,9 +19,10 @@ import java.io.IOException; -import junit.framework.TestCase; +import org.junit.Test; -public class TestSetrepDecreasing extends TestCase { +public class TestSetrepDecreasing { + @Test public void testSetrepDecreasing() throws IOException { TestSetrepIncreasing.setrep(5, 3, false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java index 29c1aa221e..9824064c71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java @@ -17,14 +17,21 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.junit.Test; -public class TestSetrepIncreasing extends TestCase { +public class TestSetrepIncreasing { static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -68,9 +75,11 @@ static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOEx } } + @Test public void testSetrepIncreasing() throws IOException { setrep(3, 7, false); } + @Test public void testSetrepIncreasingSimulatedStorage() throws IOException { setrep(3, 7, true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java index 60e8f0487c..8cbb4fd317 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java @@ -17,23 +17,27 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; import java.util.Random; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.junit.Test; /** * This class tests the creation of files with block-size * smaller than the default buffer size of 4K. */ -public class TestSmallBlock extends TestCase { +public class TestSmallBlock { static final long seed = 0xDEADBEEFL; static final int blockSize = 1; static final int fileSize = 20; @@ -90,6 +94,7 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException { /** * Tests small block size in in DFS. */ + @Test public void testSmallBlock() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { @@ -108,6 +113,7 @@ public void testSmallBlock() throws IOException { cluster.shutdown(); } } + @Test public void testSmallBlockSimulatedStorage() throws IOException { simulatedStorage = true; testSmallBlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java index 68c593b2e8..5503238330 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs; +import java.io.OutputStream; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; - -import java.io.OutputStream; import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 0b6bceafaf..7f792f43b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; + import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -28,24 +31,21 @@ import java.util.Arrays; import java.util.Collections; import java.util.zip.CRC32; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; - -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; - import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java index 602f016dac..4c117a992e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.protocol; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java index d329f49376..89e8b0eb62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.protocol; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.util.EnumSet; -import static org.junit.Assert.*; -import org.junit.Test; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.junit.Test; /** * Test for {@link LayoutVersion} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index f58c8636a1..c009c451b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.protocolPB; -import static junit.framework.Assert.*; +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertTrue; import java.util.ArrayList; import java.util.Arrays; @@ -27,9 +28,9 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; @@ -57,10 +58,10 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; -import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java index f4c1b7e328..5f74b7a20f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java @@ -20,7 +20,8 @@ -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayInputStream; import java.io.DataInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java index 4f08a7e5e3..cb55854f1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java @@ -20,8 +20,6 @@ import java.io.IOException; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.security.token.Token; /** Utilities for security tests */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index e8a9d29dcb..5c9a569533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.balancer; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import java.io.IOException; import java.net.URI; import java.util.ArrayList; @@ -41,13 +44,11 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; /** * This class tests if a balancer schedules tasks correctly. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index 9d13a2b619..1a309910eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; +import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index b130e027b0..dfd0b947c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java index f1991345da..b8eb6855fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.junit.Test; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import java.util.ArrayList; import java.util.Iterator; @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.junit.Test; /** * This class provides tests for BlockInfo class, which is used in BlocksMap. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java index 2d676578ff..b22383469f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java @@ -17,25 +17,25 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.junit.Test; /** * Test if FSNamesystem handles heartbeat right */ -public class TestComputeInvalidateWork extends TestCase { +public class TestComputeInvalidateWork { /** * Test if {@link FSNamesystem#computeInvalidateWork(int)} * can schedule invalidate work correctly */ + @Test public void testCompInvalidate() throws Exception { final Configuration conf = new HdfsConfiguration(); final int NUM_OF_DATANODES = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index ee6a26026c..0912ad9023 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Arrays; import java.util.HashMap; @@ -24,12 +29,11 @@ import java.util.List; import java.util.Map; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; +import org.junit.Test; /** @@ -38,7 +42,7 @@ * CorruptReplicasMap::getCorruptReplicaBlockIds * return the correct values */ -public class TestCorruptReplicaInfo extends TestCase { +public class TestCorruptReplicaInfo { private static final Log LOG = LogFactory.getLog(TestCorruptReplicaInfo.class); @@ -60,6 +64,7 @@ private Block getBlock(int block_id) { return getBlock((long)block_id); } + @Test public void testCorruptReplicaInfo() throws IOException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java index e43310cb43..33369720c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java @@ -17,21 +17,25 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; - -import junit.framework.TestCase; +import org.junit.Test; /** * This class tests that methods in DatanodeDescriptor */ -public class TestDatanodeDescriptor extends TestCase { +public class TestDatanodeDescriptor { /** * Test that getInvalidateBlocks observes the maxlimit. */ + @Test public void testGetInvalidateBlocks() throws Exception { final int MAX_BLOCKS = 10; final int REMAINING_BLOCKS = 2; @@ -49,6 +53,7 @@ public void testGetInvalidateBlocks() throws Exception { assertEquals(bc.length, REMAINING_BLOCKS); } + @Test public void testBlocksCounter() throws Exception { DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor(); assertEquals(0, dd.numBlocks()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index 2d7a122c46..7448da776b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.ArrayList; +import static org.junit.Assert.assertEquals; -import junit.framework.TestCase; +import java.util.ArrayList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -34,17 +34,19 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.junit.Test; /** * Test if FSNamesystem handles heartbeat right */ -public class TestHeartbeatHandling extends TestCase { +public class TestHeartbeatHandling { /** * Test if * {@link FSNamesystem#handleHeartbeat} * can pick up replication and/or invalidate requests and observes the max * limit */ + @Test public void testHeartbeat() throws Exception { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java index 081438075c..151d035135 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java @@ -18,13 +18,15 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.hdfs.DFSTestUtil; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; - public class TestHost2NodesMap { private Host2NodesMap map = new Host2NodesMap(); private DatanodeDescriptor dataNodes[]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java index d4be55660c..80c8eb5008 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertTrue; + import java.util.Collection; import java.util.Iterator; import java.util.concurrent.TimeoutException; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -32,11 +32,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager; -import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * Test if live nodes count per node is correct @@ -45,7 +43,7 @@ * Two of the "while" loops below use "busy wait" * because they are detecting transient states. */ -public class TestNodeCount extends TestCase { +public class TestNodeCount { final short REPLICATION_FACTOR = (short)2; final long TIMEOUT = 20000L; long timeout = 0; @@ -53,6 +51,7 @@ public class TestNodeCount extends TestCase { Block lastBlock = null; NumberReplicas lastNum = null; + @Test public void testNodeCount() throws Exception { // start a mini dfs cluster of 2 nodes final Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index 68b3f3ec97..dc8578e13e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -18,12 +18,13 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.util.Time.now; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java index bc6c4dfb45..3c7ad8ca02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import java.util.Queue; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java index 0d138e05d1..dc390d2e59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java @@ -17,17 +17,19 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import junit.framework.TestCase; -import java.lang.System; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import org.apache.hadoop.hdfs.protocol.Block; +import org.junit.Test; /** * This class tests the internals of PendingReplicationBlocks.java */ -public class TestPendingReplication extends TestCase { +public class TestPendingReplication { final static int TIMEOUT = 3; // 3 seconds + @Test public void testPendingReplication() { PendingReplicationBlocks pendingReplications; pendingReplications = new PendingReplicationBlocks(TIMEOUT * 1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index eacd09b374..8c8674dbeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; @@ -34,8 +37,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** * Test when RBW block is removed. Invalidation of the corrupted block happens diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index aefd0befed..febbba5149 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.File; import java.util.ArrayList; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 5af87adb5c..5fbd44d5e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -26,7 +30,6 @@ import java.util.Map; import java.util.Set; -import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -39,7 +42,7 @@ import org.apache.hadoop.net.Node; import org.junit.Test; -public class TestReplicationPolicyWithNodeGroup extends TestCase { +public class TestReplicationPolicyWithNodeGroup { private static final int BLOCK_SIZE = 1024; private static final int NUM_OF_DATANODES = 8; private static final Configuration CONF = new HdfsConfiguration(); @@ -104,6 +107,7 @@ private static void setupDataNodeCapacity() { * the 1st is on dataNodes[0] and the 2nd is on a different rack. * @throws Exception */ + @Test public void testChooseTarget1() throws Exception { dataNodes[0].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, @@ -164,6 +168,7 @@ private void verifyNoTwoTargetsOnSameNodeGroup(DatanodeDescriptor[] targets) { * node group, and the rest should be placed on a third rack. * @throws Exception */ + @Test public void testChooseTarget2() throws Exception { HashMap excludedNodes; DatanodeDescriptor[] targets; @@ -207,6 +212,7 @@ public void testChooseTarget2() throws Exception { * and the rest should be placed on the third rack. * @throws Exception */ + @Test public void testChooseTarget3() throws Exception { // make data node 0 to be not qualified to choose dataNodes[0].updateHeartbeat( @@ -259,6 +265,7 @@ public void testChooseTarget3() throws Exception { * in different node group. * @throws Exception */ + @Test public void testChooseTarget4() throws Exception { // make data node 0-2 to be not qualified to choose: not enough disk space for(int i=0; i<3; i++) { @@ -302,6 +309,7 @@ public void testChooseTarget4() throws Exception { * the 3rd replica should be placed on the same rack as the 2nd replica, * @throws Exception */ + @Test public void testChooseTarget5() throws Exception { setupDataNodeCapacity(); DatanodeDescriptor[] targets; @@ -333,6 +341,7 @@ public void testChooseTarget5() throws Exception { * the 1st replica. The 3rd replica can be placed randomly. * @throws Exception */ + @Test public void testRereplicate1() throws Exception { setupDataNodeCapacity(); List chosenNodes = new ArrayList(); @@ -369,6 +378,7 @@ public void testRereplicate1() throws Exception { * the rest replicas can be placed randomly, * @throws Exception */ + @Test public void testRereplicate2() throws Exception { setupDataNodeCapacity(); List chosenNodes = new ArrayList(); @@ -399,6 +409,7 @@ public void testRereplicate2() throws Exception { * the rest replicas can be placed randomly, * @throws Exception */ + @Test public void testRereplicate3() throws Exception { setupDataNodeCapacity(); List chosenNodes = new ArrayList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java index 7f208a4e34..979ab888f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -27,8 +27,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.junit.Test; -public class TestUnderReplicatedBlocks extends TestCase { +public class TestUnderReplicatedBlocks { + @Test public void testSetrepIncWithUnderReplicatedBlocks() throws Exception { Configuration conf = new HdfsConfiguration(); final short REPLICATION_FACTOR = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java index b1ffca8227..e4f9697f46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java @@ -17,19 +17,21 @@ */ package org.apache.hadoop.hdfs.server.common; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import java.io.IOException; import java.net.URI; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.junit.Test; /** * This is a unit test, which tests {@link Util#stringAsURI(String)} * for Windows and Unix style file paths. */ -public class TestGetUriFromString extends TestCase { +public class TestGetUriFromString { private static final Log LOG = LogFactory.getLog(TestGetUriFromString.class); private static final String RELATIVE_FILE_PATH = "relativeFilePath"; @@ -49,6 +51,7 @@ public class TestGetUriFromString extends TestCase { * Test for a relative path, os independent * @throws IOException */ + @Test public void testRelativePathAsURI() throws IOException { URI u = Util.stringAsURI(RELATIVE_FILE_PATH); LOG.info("Uri: " + u); @@ -59,6 +62,7 @@ public void testRelativePathAsURI() throws IOException { * Test for an OS dependent absolute paths. * @throws IOException */ + @Test public void testAbsolutePathAsURI() throws IOException { URI u = null; u = Util.stringAsURI(ABSOLUTE_PATH_WINDOWS); @@ -74,6 +78,7 @@ public void testAbsolutePathAsURI() throws IOException { * Test for a URI * @throws IOException */ + @Test public void testURI() throws IOException { LOG.info("Testing correct Unix URI: " + URI_UNIX); URI u = Util.stringAsURI(URI_UNIX); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java index b10d27e435..3b07fe7978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index ee6a7b5590..b9f58baef3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -47,15 +51,10 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Time; - import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; /** * This class tests if block replacement request to data nodes work correctly. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java index e2954855ea..b461e3a9f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java @@ -23,10 +23,10 @@ import javax.management.MBeanServer; import javax.management.ObjectName; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.conf.Configuration; -import org.junit.Test; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.Assert; +import org.junit.Test; /** * Class for testing {@link DataNodeMXBean} implementation diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 35e83fa1b2..81748ba2cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -20,7 +20,8 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.List; import java.util.Random; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java index fe6e8b7973..4e4df657de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java @@ -19,12 +19,13 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; -import junit.framework.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -74,7 +75,7 @@ public void testDeleteBlockPool() throws Exception { // Although namenode is shutdown, the bp offerservice is still running try { dn1.deleteBlockPool(bpid1, true); - Assert.fail("Must not delete a running block pool"); + fail("Must not delete a running block pool"); } catch (IOException expected) { } @@ -85,7 +86,7 @@ public void testDeleteBlockPool() throws Exception { try { dn1.deleteBlockPool(bpid1, false); - Assert.fail("Must not delete if any block files exist unless " + fail("Must not delete if any block files exist unless " + "force is true"); } catch (IOException expected) { } @@ -115,7 +116,7 @@ public void testDeleteBlockPool() throws Exception { // on dn2 try { dn2.deleteBlockPool(bpid1, true); - Assert.fail("Must not delete a running block pool"); + fail("Must not delete a running block pool"); } catch (IOException expected) { } @@ -180,21 +181,21 @@ public void testDfsAdminDeleteBlockPool() throws Exception { Configuration nn1Conf = cluster.getConfiguration(0); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1"); dn1.refreshNamenodes(nn1Conf); - Assert.assertEquals(1, dn1.getAllBpOs().length); + assertEquals(1, dn1.getAllBpOs().length); DFSAdmin admin = new DFSAdmin(nn1Conf); String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort(); String[] args = { "-deleteBlockPool", dn1Address, bpid2 }; int ret = admin.run(args); - Assert.assertFalse(0 == ret); + assertFalse(0 == ret); verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2); verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2); String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" }; ret = admin.run(forceArgs); - Assert.assertEquals(0, ret); + assertEquals(0, ret); verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2); verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2); @@ -216,7 +217,7 @@ private void verifyBlockPoolDirectories(boolean shouldExist, + bpid); if (shouldExist == false) { - Assert.assertFalse(bpDir.exists()); + assertFalse(bpDir.exists()); } else { File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); File finalizedDir = new File(bpCurrentDir, @@ -224,9 +225,9 @@ private void verifyBlockPoolDirectories(boolean shouldExist, File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); File versionFile = new File(bpCurrentDir, "VERSION"); - Assert.assertTrue(finalizedDir.isDirectory()); - Assert.assertTrue(rbwDir.isDirectory()); - Assert.assertTrue(versionFile.exists()); + assertTrue(finalizedDir.isDirectory()); + assertTrue(rbwDir.isDirectory()); + assertTrue(versionFile.exists()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index ea0143eacd..045df0bbb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -25,8 +31,6 @@ import java.util.List; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -38,15 +42,16 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; +import org.junit.Test; /** * Tests {@link DirectoryScanner} handling of differences * between blocks on the disk and block in memory. */ -public class TestDirectoryScanner extends TestCase { +public class TestDirectoryScanner { private static final Log LOG = LogFactory.getLog(TestDirectoryScanner.class); private static final Configuration CONF = new HdfsConfiguration(); private static final int DEFAULT_GEN_STAMP = 9999; @@ -218,6 +223,7 @@ private void scan(long totalBlocks, int diffsize, long missingMetaFile, long mis assertEquals(mismatchBlocks, stats.mismatchBlocks); } + @Test public void testDirectoryScanner() throws Exception { // Run the test with and without parallel scanning for (int parallelism = 1; parallelism < 3; parallelism++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java index cf2e4483de..b293075ddd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.test.MetricsAsserts.*; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import java.util.EnumSet; import java.util.Random; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index 2d6f210379..b3719ad63b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -18,7 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index a5c8551074..8ff6cb8886 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import static org.junit.Assert.*; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java index e35447391c..db1cbbc8cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReplicaMap; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java index b2cb080066..41a4fe4555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hdfs.server.journalservice; +import static org.junit.Assert.assertNotNull; + import java.io.IOException; import java.net.InetSocketAddress; -import junit.framework.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -122,6 +122,6 @@ public void verifyFence(JournalService s, NameNode nn) throws Exception { // New epoch higher than the current epoch is successful FenceResponse resp = s.fence(info, currentEpoch+1, "fencer"); - Assert.assertNotNull(resp); + assertNotNull(resp); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 772053d2c3..c0ed7be829 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; @@ -37,33 +43,29 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; +import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.mockito.Mockito; import org.mockito.Matchers; +import org.mockito.Mockito; import com.google.common.base.Joiner; -import com.google.common.collect.Lists; import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.io.Files; -import static org.junit.Assert.*; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; - /** * Utility functions for testing fsimage storage. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index a3f84533cf..fbe48bd2e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Options.Rename; @@ -46,7 +47,6 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; /** * OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index d7bb0f7cbc..59b6cc21c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; -import org.junit.Before; -import org.junit.After; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.BufferedReader; import java.io.File; @@ -44,6 +45,8 @@ import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; import org.apache.log4j.RollingFileAppender; +import org.junit.After; +import org.junit.Before; import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 4118be2048..13f871934e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java index 20d4c720de..8536a2cb66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; -import junit.framework.Assert; -import java.io.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -120,7 +122,7 @@ public void testSaveNamespace() throws IOException { renewToken(token1); renewToken(token2); } catch (IOException e) { - Assert.fail("Could not renew or cancel the token"); + fail("Could not renew or cancel the token"); } namesystem = cluster.getNamesystem(); @@ -148,7 +150,7 @@ public void testSaveNamespace() throws IOException { renewToken(token5); } catch (IOException e) { - Assert.fail("Could not renew or cancel the token"); + fail("Could not renew or cancel the token"); } // restart cluster again @@ -171,7 +173,7 @@ public void testSaveNamespace() throws IOException { renewToken(token5); cancelToken(token5); } catch (IOException e) { - Assert.fail("Could not renew or cancel the token"); + fail("Could not renew or cancel the token"); } } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index af285cc3eb..b2496f92b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -18,24 +18,26 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import junit.framework.TestCase; +import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints; +import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs; -import java.lang.management.ManagementFactory; -import java.net.InetSocketAddress; import java.io.File; import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.InetSocketAddress; import java.net.URI; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Random; +import junit.framework.TestCase; + import org.apache.commons.cli.ParseException; -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -44,6 +46,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -78,9 +81,6 @@ import com.google.common.collect.Lists; import com.google.common.primitives.Ints; -import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints; -import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs; - /** * This class tests the creation and validation of a checkpoint. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index 445b744778..4330317d6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java index dacd03bbfc..d399ddf856 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertTrue; + import java.net.URL; import java.util.Collection; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 05739a8155..d78198ab40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.concurrent.TimeoutException; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -121,7 +122,7 @@ public void testDeadDatanode() throws Exception { // Ensure blockReceived call from dead datanode is rejected with IOException try { dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks); - Assert.fail("Expected IOException is not thrown"); + fail("Expected IOException is not thrown"); } catch (IOException ex) { // Expected } @@ -132,7 +133,7 @@ public void testDeadDatanode() throws Exception { new long[] { 0L, 0L, 0L }) }; try { dnp.blockReport(reg, poolId, report); - Assert.fail("Expected IOException is not thrown"); + fail("Expected IOException is not thrown"); } catch (IOException ex) { // Expected } @@ -142,8 +143,8 @@ public void testDeadDatanode() throws Exception { StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0, 0, 0) }; DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0).getCommands(); - Assert.assertEquals(1, cmd.length); - Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER + assertEquals(1, cmd.length); + assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER .getAction()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 5ce5b18a00..e7abc1132d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -17,20 +17,35 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import junit.framework.TestCase; -import java.io.*; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintWriter; +import java.io.RandomAccessFile; +import java.io.StringWriter; import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Arrays; +import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.Random; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -41,17 +56,15 @@ import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.*; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; - +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; @@ -59,19 +72,16 @@ import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.aspectj.util.FileUtil; - -import org.mockito.Mockito; import org.junit.Test; +import org.mockito.Mockito; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; -import static org.apache.hadoop.test.MetricsAsserts.*; - /** * This class tests the creation and validation of a checkpoint. */ -public class TestEditLog extends TestCase { +public class TestEditLog { static { ((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL); @@ -163,6 +173,7 @@ private static FSEditLog getFSEditLog(NNStorage storage) throws IOException { /** * Test case for an empty edit log from a prior version of Hadoop. */ + @Test public void testPreTxIdEditLogNoEdits() throws Exception { FSNamesystem namesys = Mockito.mock(FSNamesystem.class); namesys.dir = Mockito.mock(FSDirectory.class); @@ -176,6 +187,7 @@ public void testPreTxIdEditLogNoEdits() throws Exception { * Test case for loading a very simple edit log from a format * prior to the inclusion of edit transaction IDs in the log. */ + @Test public void testPreTxidEditLogWithEdits() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -204,6 +216,7 @@ private long testLoad(byte[] data, FSNamesystem namesys) throws IOException { /** * Simple test for writing to and rolling the edit log. */ + @Test public void testSimpleEditLog() throws IOException { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -248,6 +261,7 @@ public void testSimpleEditLog() throws IOException { /** * Tests transaction logging in dfs. */ + @Test public void testMultiThreadedEditLog() throws IOException { testEditLog(2048); // force edit buffer to automatically sync on each log of edit log entry @@ -397,6 +411,7 @@ public Void call() throws Exception { }).get(); } + @Test public void testSyncBatching() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -459,6 +474,7 @@ public void testSyncBatching() throws Exception { * This sequence is legal and can occur if enterSafeMode() is closely * followed by saveNamespace. */ + @Test public void testBatchedSyncWithClosedLogs() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -498,6 +514,7 @@ public void testBatchedSyncWithClosedLogs() throws Exception { } } + @Test public void testEditChecksum() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); @@ -549,6 +566,7 @@ public void testEditChecksum() throws Exception { * Test what happens if the NN crashes when it has has started but * had no transactions written. */ + @Test public void testCrashRecoveryNoTransactions() throws Exception { testCrashRecovery(0); } @@ -557,6 +575,7 @@ public void testCrashRecoveryNoTransactions() throws Exception { * Test what happens if the NN crashes when it has has started and * had a few transactions written */ + @Test public void testCrashRecoveryWithTransactions() throws Exception { testCrashRecovery(150); } @@ -666,22 +685,26 @@ private void testCrashRecovery(int numTransactions) throws Exception { } // should succeed - only one corrupt log dir + @Test public void testCrashRecoveryEmptyLogOneDir() throws Exception { doTestCrashRecoveryEmptyLog(false, true, true); } // should fail - seen_txid updated to 3, but no log dir contains txid 3 + @Test public void testCrashRecoveryEmptyLogBothDirs() throws Exception { doTestCrashRecoveryEmptyLog(true, true, false); } // should succeed - only one corrupt log dir + @Test public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId() throws Exception { doTestCrashRecoveryEmptyLog(false, false, true); } // should succeed - both log dirs corrupt, but seen_txid never updated + @Test public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId() throws Exception { doTestCrashRecoveryEmptyLog(true, false, true); @@ -829,6 +852,7 @@ public boolean isInProgress() { } } + @Test public void testFailedOpen() throws Exception { File logDir = new File(TEST_DIR, "testFailedOpen"); logDir.mkdirs(); @@ -850,6 +874,7 @@ public void testFailedOpen() throws Exception { * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if * logSync isn't called periodically, the edit log will sync itself. */ + @Test public void testAutoSync() throws Exception { File logDir = new File(TEST_DIR, "testAutoSync"); logDir.mkdirs(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java index 4572cee84f..51c9864406 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.io.OutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 819614993d..17aacaac88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -17,41 +17,41 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.logging.Log; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; - -import java.io.*; +import java.io.File; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.permission.*; - +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; - -import static org.junit.Assert.*; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import static org.mockito.Mockito.*; - /** * This class tests various synchronization bugs in FSEditLog rolling * and namespace saving. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java index 5e828b65e1..9feeada276 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 267e128413..af5b76d4c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.spy; import java.io.BufferedInputStream; import java.io.File; @@ -30,7 +32,6 @@ import java.io.RandomAccessFile; import java.nio.channels.FileChannel; import java.util.Map; -import java.util.Set; import java.util.SortedMap; import org.apache.commons.logging.impl.Log4JLogger; @@ -44,20 +45,14 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; import org.apache.log4j.Level; import org.junit.Test; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; import com.google.common.io.Files; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.spy; - public class TestFSEditLogLoader { static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java index 500c5c3c69..01d54b814d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java @@ -17,19 +17,21 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; +import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.junit.Test; public class TestFSImageStorageInspector { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java index de3a89c083..f0c5c688f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.junit.Assert.*; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index e462530d5b..1a968c7d1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -17,34 +17,36 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import java.net.URI; -import java.util.Collections; -import java.util.List; -import java.util.Iterator; -import java.util.PriorityQueue; - -import java.io.RandomAccessFile; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; -import org.junit.Test; +import java.io.RandomAccessFile; +import java.net.URI; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.PriorityQueue; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; +import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL; -import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL; +import org.junit.Test; -import com.google.common.collect.ImmutableList; import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; public class TestFileJournalManager { static final Log LOG = LogFactory.getLog(TestFileJournalManager.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java index 43e1c157b5..7fd6f47ad5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java @@ -17,27 +17,28 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.junit.Test; /** * This class tests that a file system adheres to the limit of * maximum number of files that is configured. */ -public class TestFileLimit extends TestCase { +public class TestFileLimit { static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; boolean simulatedStorage = false; @@ -75,6 +76,7 @@ private void waitForLimit(FSNamesystem namesys, long num) /** * Test that file data becomes available before file is closed. */ + @Test public void testFileLimit() throws IOException { Configuration conf = new HdfsConfiguration(); int maxObjects = 5; @@ -166,6 +168,7 @@ public void testFileLimit() throws IOException { } } + @Test public void testFileLimitSimulated() throws IOException { simulatedStorage = true; testFileLimit(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index cef0a0db87..fd79ad151f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.mock; @@ -30,12 +31,11 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index aec35a7914..b79904518e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; @@ -57,7 +61,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; -import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java index 735001d68c..2b67136931 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.junit.Test; - +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; -import static org.junit.Assert.*; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import java.io.IOException; import java.net.URI; import java.util.Collection; -import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.junit.Test; public class TestGenericJournalConf { private static final String DUMMY_URI = "dummy://test"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java index 62fac1998f..d040278c5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index e87cad63b0..0882d18386 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -25,10 +25,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 3443fa814f..f124879b1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -25,9 +27,6 @@ import java.util.Collection; import java.util.Random; -import org.junit.Test; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -35,13 +34,14 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.BlockMissingException; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.StringUtils; +import org.junit.Test; /** * This class tests the listCorruptFileBlocks API. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 9ec5d95ba6..2f2b6888fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -17,27 +17,26 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.junit.Test; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import java.io.BufferedReader; -import java.io.FileInputStream; -import java.io.DataInputStream; -import java.io.InputStreamReader; -import java.io.IOException; -import java.lang.InterruptedException; -import java.util.Random; import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.DataInputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Random; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; - +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; /** * This class tests the creation and validation of metasave diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index e7a9cc1d49..d25676891a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; +import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals; + import java.io.File; import java.io.IOException; @@ -31,11 +36,6 @@ import com.google.common.base.Joiner; -import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - /** * Functional tests for NNStorageRetentionManager. This differs from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index 4c6334f53a..df70499888 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; + import java.io.IOException; import java.util.List; import java.util.Map; @@ -25,13 +29,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; +import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java index 89716910d9..8d635f63bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index fcbc489017..ab013b5fbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; import java.lang.management.ManagementFactory; @@ -28,17 +29,15 @@ import javax.management.MBeanServer; import javax.management.ObjectName; +import junit.framework.Assert; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.util.VersionInfo; - import org.junit.Test; import org.mortbay.util.ajax.JSON; -import junit.framework.Assert; - /** * Class for testing {@link NameNodeMXBean} implementation */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index df1456b203..1717bb0412 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -18,16 +18,18 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.spy; + import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.util.HashSet; import java.util.Set; -import static org.junit.Assert.*; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.spy; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -37,10 +39,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; -import org.apache.hadoop.hdfs.server.namenode.FSImage; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java index 53eb88402d..e73d71aff7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; import java.net.URISyntaxException; @@ -34,10 +38,6 @@ import org.junit.Test; import org.mockito.Mockito; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - public class TestNameNodeResourceChecker { private Configuration conf; private File baseDir; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java index 49a96e9b66..6e0657c8d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index 239ff78123..2883f99f69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.util.ArrayList; import java.util.List; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -34,13 +34,14 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.junit.Test; /** * This tests InterDataNodeProtocol for block handling. */ -public class TestNamenodeCapacityReport extends TestCase { +public class TestNamenodeCapacityReport { private static final Log LOG = LogFactory.getLog(TestNamenodeCapacityReport.class); /** @@ -48,6 +49,7 @@ public class TestNamenodeCapacityReport extends TestCase { * It verifies the block information from a datanode. * Then, it updates the block with new information and verifies again. */ + @Test public void testVolumeSize() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java index 4b8409535c..f9ba34e15f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java @@ -18,32 +18,37 @@ package org.apache.hadoop.hdfs.server.namenode; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Collections; +import java.util.List; + +import junit.framework.AssertionFailedError; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; - -import java.util.Collections; -import java.util.List; - -import java.io.File; +import org.junit.Test; /** * A JUnit test for checking if restarting DFS preserves integrity. * Specifically with FSImage being written in parallel */ -public class TestParallelImageWrite extends TestCase { +public class TestParallelImageWrite { private static final int NUM_DATANODES = 4; /** check if DFS remains in proper condition after a restart */ + @Test public void testRestartDFS() throws Exception { final Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java index 506630fcb3..9a712ef28b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.hdfs.server.namenode.INode; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.fs.Path; - -import org.junit.Test; +import static org.junit.Assert.assertTrue; import java.util.Arrays; -import static org.junit.Assert.*; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSUtil; +import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java index 440574e14e..30ea90a415 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java @@ -17,10 +17,13 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + import java.io.IOException; import java.security.PrivilegedExceptionAction; -import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -79,14 +82,14 @@ public FileSystem run() throws Exception { try { Path p = new Path("/users"); fs.mkdirs(p); - Assert.fail("user1 must not be allowed to write in /"); + fail("user1 must not be allowed to write in /"); } catch (IOException expected) { } Path p = new Path("/tmp/alpha"); fs.mkdirs(p); - Assert.assertNotNull(fs.listStatus(p)); - Assert.assertEquals(AuthenticationMethod.KERBEROS, + assertNotNull(fs.listStatus(p)); + assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod()); } finally { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index 22857c902e..e3056e9b0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -17,28 +17,30 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; import java.net.URI; import java.util.Iterator; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; - import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.junit.Test; /** * This class tests the creation and validation of a checkpoint. */ -public class TestSecurityTokenEditLog extends TestCase { +public class TestSecurityTokenEditLog { static final int NUM_DATA_NODES = 1; // This test creates NUM_THREADS threads and each thread does @@ -85,6 +87,7 @@ public void run() { /** * Tests transaction logging in dfs. */ + @Test public void testEditLog() throws IOException { // start a cluster diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 717dc74627..90fa4d475f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -19,6 +19,10 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; @@ -29,8 +33,6 @@ import java.util.List; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -41,7 +43,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; - import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -56,13 +57,15 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; -import org.junit.Assert; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * Startup and checkpoint tests * */ -public class TestStartup extends TestCase { +public class TestStartup { public static final String NAME_NODE_HOST = "localhost:"; public static final String WILDCARD_HTTP_HOST = "0.0.0.0:"; private static final Log LOG = @@ -88,8 +91,8 @@ private void writeFile(FileSystem fileSys, Path name, int repl) } - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()); @@ -115,7 +118,7 @@ protected void setUp() throws Exception { /** * clean up */ - @Override + @After public void tearDown() throws Exception { if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory in tearDown '" + hdfsDir + "'"); @@ -258,6 +261,7 @@ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expecte * checkpoint for edits and image is the same directory * @throws IOException */ + @Test public void testChkpointStartup2() throws IOException{ LOG.info("--starting checkpointStartup2 - same directory for checkpoint"); // different name dirs @@ -283,6 +287,7 @@ public void testChkpointStartup2() throws IOException{ * checkpoint for edits and image are different directories * @throws IOException */ + @Test public void testChkpointStartup1() throws IOException{ //setUpConfig(); LOG.info("--starting testStartup Recovery"); @@ -307,6 +312,7 @@ public void testChkpointStartup1() throws IOException{ * secondary node copies fsimage and edits into correct separate directories. * @throws IOException */ + @Test public void testSNNStartup() throws IOException{ //setUpConfig(); LOG.info("--starting SecondNN startup test"); @@ -370,6 +376,7 @@ public void testSNNStartup() throws IOException{ } } + @Test public void testCompression() throws IOException { LOG.info("Test compressing image."); Configuration conf = new Configuration(); @@ -426,6 +433,7 @@ private void checkNameSpace(Configuration conf) throws IOException { namenode.join(); } + @Test public void testImageChecksum() throws Exception { LOG.info("Test uncompressed image checksum"); testImageChecksum(false); @@ -493,6 +501,7 @@ private void testImageChecksum(boolean compress) throws Exception { * restarts, the still alive datanodes should not have any trouble in getting * registrant again. */ + @Test public void testNNRestart() throws IOException, InterruptedException { MiniDFSCluster cluster = null; FileSystem localFileSys; @@ -527,7 +536,7 @@ public void testNNRestart() throws IOException, InterruptedException { cluster.restartNameNode(); NamenodeProtocols nn = cluster.getNameNodeRpc(); assertNotNull(nn); - Assert.assertTrue(cluster.isDataNodeUp()); + assertTrue(cluster.isDataNodeUp()); DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE); for (int i = 0 ; i < 5 && info.length != numDatanodes; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java index 1a40159bc9..2fb784d46f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.net.URI; import java.util.Collections; -import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -67,7 +69,7 @@ public void tearDown() throws Exception { public void testStartupOptUpgradeFrom204() throws Exception { layoutVersion = Feature.RESERVED_REL20_204.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertTrue("Clusterid should start with CID", storage.getClusterID() + assertTrue("Clusterid should start with CID", storage.getClusterID() .startsWith("CID")); } @@ -83,7 +85,7 @@ public void testStartupOptUpgradeFrom22WithCID() throws Exception { startOpt.setClusterId("cid"); layoutVersion = Feature.RESERVED_REL22.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the given clusterid", + assertEquals("Clusterid should match with the given clusterid", "cid", storage.getClusterID()); } @@ -101,7 +103,7 @@ public void testStartupOptUpgradeFromFederation() storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the existing one", + assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); } @@ -119,7 +121,7 @@ public void testStartupOptUpgradeFromFederationWithWrongCID() storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the existing one", + assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); } @@ -137,7 +139,7 @@ public void testStartupOptUpgradeFromFederationWithCID() storage.setClusterID("currentcid"); layoutVersion = Feature.FEDERATION.getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); - Assert.assertEquals("Clusterid should match with the existing one", + assertEquals("Clusterid should match with the existing one", "currentcid", storage.getClusterID()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 8834e9170e..02c8816be9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -18,9 +18,14 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; +import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; import java.io.File; import java.io.IOException; @@ -28,9 +33,6 @@ import java.util.Iterator; import java.util.Set; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.spy; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.cli.CLITestCmdDFS; @@ -45,11 +47,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; - -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; -import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; - import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java index 2ac39151bb..ae3f1a9459 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java index 37e2967965..4e9efd5a57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java @@ -17,19 +17,21 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; -import org.junit.Test; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; -import junit.framework.Assert; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; /** * This class tests the validation of the configuration object when passed @@ -53,7 +55,7 @@ public void testThatMatchingRPCandHttpPortsThrowException() DFSTestUtil.formatNameNode(conf); try { NameNode nameNode = new NameNode(conf); - Assert.fail("Should have throw the exception since the ports match"); + fail("Should have throw the exception since the ports match"); } catch (IOException e) { // verify we're getting the right IOException assertTrue(e.toString().contains("dfs.namenode.rpc-address (")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 4f93f4f43b..2dc3d1d616 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import java.io.ByteArrayInputStream; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; import java.net.URI; -import java.util.Collections; import java.util.List; import java.util.Set; @@ -36,20 +38,16 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.After; import org.junit.Before; import org.junit.Test; -import com.google.common.base.Suppliers; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; -import static org.junit.Assert.*; - public class TestBootstrapStandby { private static final Log LOG = LogFactory.getLog(TestBootstrapStandby.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java index ccc46a204b..4f213b2405 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.net.URI; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java index 52e136940d..98ec33e5d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.concurrent.TimeoutException; @@ -26,11 +27,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ha.ClientBaseWithFixes; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HealthMonitor; +import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer; import org.apache.hadoop.ha.ZKFCTestUtil; import org.apache.hadoop.ha.ZKFailoverController; -import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -44,7 +45,6 @@ import org.junit.Before; import org.junit.Test; - import com.google.common.base.Supplier; public class TestDFSZKFailoverController extends ClientBaseWithFixes { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 2a144b88c9..96a890e222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -41,12 +41,12 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index f79ab109ce..cd090cbe6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -17,7 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.DataInputStream; @@ -32,6 +37,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -56,7 +62,6 @@ import org.junit.Test; import com.google.common.base.Joiner; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; /** * Test case for client support of delegation tokens in an HA cluster. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index bc5c487a76..8675fa3fc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -22,8 +22,6 @@ import java.io.File; import java.io.IOException; import java.net.URI; -import java.util.List; -import java.util.concurrent.TimeoutException; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java index 794a3b6bf3..dd5c1bab75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import java.io.File; import java.io.FileOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java index ed5b8e76e2..9506a2f399 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java @@ -19,7 +19,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java index abd7c72f17..ab3f3ca4f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.junit.Assert.*; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.io.IOException; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java index 10218f218e..49d89592b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.io.PrintStream; -import junit.framework.Assert; - import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; @@ -96,7 +95,7 @@ static void runFsck(Configuration conf) throws Exception { new String[]{"/", "-files"}); String result = bStream.toString(); System.out.println("output from fsck:\n" + result); - Assert.assertEquals(0, errCode); + assertEquals(0, errCode); assertTrue(result.contains("/test1")); assertTrue(result.contains("/test2")); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index c4abfd53f4..b147f4fd1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java index e44ebc9b4d..83bd7bf13b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java @@ -37,13 +37,12 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; +import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java index be01430117..da6ec3d872 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.net.URL; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index 7bc49f7d9d..72110b29a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; import java.net.URISyntaxException; @@ -42,8 +48,6 @@ import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; - public class TestInitializeSharedEdits { private static final Log LOG = LogFactory.getLog(TestInitializeSharedEdits.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java index e5b53ba3cf..ab2a8dd061 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java @@ -22,8 +22,6 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; -import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java index 5800d3a351..6ceecc79bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 3fa89105a5..61016c9540 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java index 45b3b02997..c029a673ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java @@ -17,25 +17,28 @@ */ package org.apache.hadoop.hdfs.server.namenode.metrics; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import static org.apache.hadoop.test.MetricsAsserts.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * Test case for FilesInGetListingOps metric in Namenode */ -public class TestNNMetricFilesInGetListingOps extends TestCase { +public class TestNNMetricFilesInGetListingOps { private static final Configuration CONF = new HdfsConfiguration(); private static final String NN_METRICS = "NameNodeActivity"; static { @@ -49,16 +52,16 @@ public class TestNNMetricFilesInGetListingOps extends TestCase { private DistributedFileSystem fs; private Random rand = new Random(); - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { cluster = new MiniDFSCluster.Builder(CONF).build(); cluster.waitActive(); cluster.getNameNode(); fs = (DistributedFileSystem) cluster.getFileSystem(); } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { cluster.shutdown(); } @@ -69,6 +72,7 @@ private void createFile(String fileName, long fileLen, short replicas) throws IO } + @Test public void testFilesInGetListingOps() throws Exception { createFile("/tmp1/t1", 3200, (short)3); createFile("/tmp1/t2", 3200, (short)3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 41ba2ce1bc..d29bd4ecb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.assertGauge; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; import java.io.DataInputStream; import java.io.IOException; @@ -37,8 +37,8 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 71dcce49e2..61e8ebef5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -18,29 +18,30 @@ package org.apache.hadoop.hdfs.tools; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; +import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HAServiceTarget; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.ZKFCProtocol; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.test.MockitoUtil; - import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index a2f4791a8c..38380b55f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -17,26 +17,26 @@ */ package org.apache.hadoop.hdfs.tools; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.PrintStream; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAAdmin; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.ha.HAAdmin; -import org.apache.hadoop.ha.NodeFencer; import org.apache.log4j.Level; - import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java index d55a2583b9..652979eab4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java @@ -17,6 +17,16 @@ */ package org.apache.hadoop.hdfs.tools; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -27,15 +37,9 @@ import java.util.Map; import java.util.StringTokenizer; -import static org.junit.Assert.*; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; - import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.tools.GetConf; import org.apache.hadoop.hdfs.tools.GetConf.Command; import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler; import org.apache.hadoop.net.NetUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java index a6746a249b..aabd5f685e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java @@ -18,29 +18,25 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.File; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.Map; -import java.util.HashMap; - -import org.junit.Test; -import org.junit.Before; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.HashMap; +import java.util.Map; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; -import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer; -import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; import org.apache.hadoop.hdfs.DFSTestUtil; - +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper; +import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; +import org.junit.Before; +import org.junit.Test; public class TestOfflineEditsViewer { private static final Log LOG = LogFactory.getLog(TestOfflineEditsViewer.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java index 5417c5387b..a5501d9754 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java @@ -17,20 +17,23 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; -import junit.framework.TestCase; - import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; +import org.junit.Test; /** * Test that the DelimitedImageVisistor gives the expected output based * on predetermined inputs */ -public class TestDelimitedImageVisitor extends TestCase { +public class TestDelimitedImageVisitor { private static String ROOT = System.getProperty("test.build.data","/tmp"); private static final String delim = "--"; @@ -44,6 +47,7 @@ private void build(DelimitedImageVisitor div, ImageElement elem, String val, sb.append(delim); } + @Test public void testDelimitedImageVisistor() { String filename = ROOT + "/testDIV"; File f = new File(filename); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java index 4cfed3c3bb..dcdc622563 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java @@ -17,16 +17,18 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import java.io.IOException; import java.util.Collections; import java.util.HashSet; import java.util.Set; import org.apache.hadoop.hdfs.tools.offlineImageViewer.SpotCheckImageVisitor.ImageInfo; +import org.junit.Test; -import junit.framework.TestCase; - -public class TestOIVCanReadOldVersions extends TestCase { +public class TestOIVCanReadOldVersions { // Location of fsimage files during testing. public static final String TEST_CACHE_DATA_DIR = System.getProperty("test.cache.data", "build/test/cache"); @@ -35,6 +37,7 @@ public class TestOIVCanReadOldVersions extends TestCase { // layout versions. These fsimages were previously generated and stored // with the test. Test success indicates that no changes have been made // to the OIV that causes older fsimages to be incorrectly processed. + @Test public void testOldFSImages() { // Define the expected values from the prior versions, as they were created // and verified at time of creation diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 2a5b1b671d..4f4cecd500 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import java.io.BufferedReader; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.security.token.Token; +import java.io.BufferedReader; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.EOFException; @@ -37,20 +38,23 @@ import java.util.List; import java.util.Set; -import org.junit.*; -import static org.junit.Assert.*; - +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.security.token.Token; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java index 0180732af0..9549356a7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileNotFoundException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java index 60451eb1ff..2aba515550 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.Arrays; @@ -24,7 +25,10 @@ import java.util.NavigableMap; import java.util.TreeMap; -public class TestCyclicIteration extends junit.framework.TestCase { +import org.junit.Test; + +public class TestCyclicIteration { + @Test public void testCyclicIteration() throws Exception { for(int n = 0; n < 5; n++) { checkCyclicIteration(n); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java index 8d2edf3e2e..8d9a3f9bbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; import java.nio.ByteBuffer; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java index c64427f77e..02787be69f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hdfs.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import org.apache.hadoop.hdfs.util.ExactSizeInputStream; import org.junit.Test; public class TestExactSizeInputStream { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java index 9465e46971..d8d9c7379e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java @@ -17,20 +17,24 @@ */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Random; -import org.junit.Test; -import org.junit.Before; -import static org.junit.Assert.*; - - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.util.Time; +import org.junit.Before; +import org.junit.Test; public class TestLightWeightHashSet{ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java index 5182f164b8..1ccbccf53f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java @@ -17,19 +17,21 @@ */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Random; -import org.junit.Test; -import org.junit.Before; -import static org.junit.Assert.*; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.util.Time; +import org.junit.Before; +import org.junit.Test; public class TestLightWeightLinkedSet { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java index a43774ab33..6f5b161336 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.FileOutputStream; import java.io.FileWriter; @@ -24,13 +28,10 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; - public class TestMD5FileUtils { private static final File TEST_DIR_ROOT = new File( System.getProperty("test.build.data","build/test/data")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index 015ce4dde2..aef467a0ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -18,6 +18,11 @@ package org.apache.hadoop.hdfs.web; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.mock; + import java.io.IOException; import java.net.URI; import java.net.URL; @@ -30,9 +35,9 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.web.resources.DelegationParam; -import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; +import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; @@ -41,11 +46,6 @@ import org.junit.Assert; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.mock; - public class TestWebHdfsUrl { @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java index a18af908fc..2c4721b620 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java @@ -18,17 +18,18 @@ package org.apache.hadoop.net; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.util.HashMap; import java.util.Map; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; - -import org.junit.Test; import org.junit.Before; - -import static org.junit.Assert.*; +import org.junit.Test; public class TestNetworkTopology { private final static NetworkTopology cluster = new NetworkTopology(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java index 7dbd33ab95..a3b63c7430 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java @@ -17,15 +17,18 @@ */ package org.apache.hadoop.net; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.util.HashMap; import java.util.Map; -import junit.framework.TestCase; - import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.junit.Test; -public class TestNetworkTopologyWithNodeGroup extends TestCase { +public class TestNetworkTopologyWithNodeGroup { private final static NetworkTopologyWithNodeGroup cluster = new NetworkTopologyWithNodeGroup(); @@ -48,14 +51,17 @@ public class TestNetworkTopologyWithNodeGroup extends TestCase { } } + @Test public void testNumOfChildren() throws Exception { assertEquals(cluster.getNumOfLeaves(), dataNodes.length); } + @Test public void testNumOfRacks() throws Exception { assertEquals(cluster.getNumOfRacks(), 3); } + @Test public void testRacks() throws Exception { assertEquals(cluster.getNumOfRacks(), 3); assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1])); @@ -67,6 +73,7 @@ public void testRacks() throws Exception { assertTrue(cluster.isOnSameRack(dataNodes[6], dataNodes[7])); } + @Test public void testNodeGroups() throws Exception { assertEquals(cluster.getNumOfRacks(), 3); assertTrue(cluster.isOnSameNodeGroup(dataNodes[0], dataNodes[1])); @@ -78,6 +85,7 @@ public void testNodeGroups() throws Exception { assertFalse(cluster.isOnSameNodeGroup(dataNodes[6], dataNodes[7])); } + @Test public void testGetDistance() throws Exception { assertEquals(cluster.getDistance(dataNodes[0], dataNodes[0]), 0); assertEquals(cluster.getDistance(dataNodes[0], dataNodes[1]), 2); @@ -86,6 +94,7 @@ public void testGetDistance() throws Exception { assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 8); } + @Test public void testPseudoSortByDistance() throws Exception { DatanodeDescriptor[] testNodes = new DatanodeDescriptor[4]; @@ -152,6 +161,7 @@ private Map pickNodesAtRandom(int numNodes, /** * This test checks that chooseRandom works for an excluded node. */ + @Test public void testChooseRandomExcludedNode() { String scope = "~" + NodeBase.getPath(dataNodes[0]); Map frequency = pickNodesAtRandom(100, scope); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java index 843f422448..6de0c6952f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java @@ -17,24 +17,32 @@ */ package org.apache.hadoop.security; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.*; -import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.util.StringUtils; - -import junit.framework.TestCase; +import org.junit.Test; /** Unit tests for permission */ -public class TestPermission extends TestCase { +public class TestPermission { public static final Log LOG = LogFactory.getLog(TestPermission.class); final private static Path ROOT_PATH = new Path("/data"); @@ -65,6 +73,7 @@ static FsPermission checkPermission(FileSystem fs, * either set with old param dfs.umask that takes decimal umasks * or dfs.umaskmode that takes symbolic or octal umask. */ + @Test public void testBackwardCompatibility() { // Test 1 - old configuration key with decimal // umask value should be handled when set using @@ -93,6 +102,7 @@ public void testBackwardCompatibility() { assertEquals(18, FsPermission.getUMask(conf).toShort()); } + @Test public void testCreate() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); @@ -155,6 +165,7 @@ public void testCreate() throws Exception { } } + @Test public void testFilePermision() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java index bd6e524f7c..7029f42926 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java @@ -34,8 +34,8 @@ import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.mortbay.util.ajax.JSON; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java index c66d0e48b0..6901f6439e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java @@ -18,12 +18,14 @@ package org.apache.hadoop.tools; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.IOException; import java.util.Random; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; @@ -33,14 +35,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.JMXGet; -import static org.apache.hadoop.test.MetricsAsserts.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * Startup and checkpoint tests * */ -public class TestJMXGet extends TestCase { +public class TestJMXGet { private Configuration config; private MiniDFSCluster cluster; @@ -62,15 +66,15 @@ private void writeFile(FileSystem fileSys, Path name, int repl) } - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { config = new HdfsConfiguration(); } /** * clean up */ - @Override + @After public void tearDown() throws Exception { if(cluster.isClusterUp()) cluster.shutdown(); @@ -86,6 +90,7 @@ public void tearDown() throws Exception { * test JMX connection to NameNode.. * @throws Exception */ + @Test public void testNameNode() throws Exception { int numDatanodes = 2; cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); @@ -114,6 +119,7 @@ public void testNameNode() throws Exception { * test JMX connection to DataNode.. * @throws Exception */ + @Test public void testDataNode() throws Exception { int numDatanodes = 2; cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();