diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 92f0c41c01..6ad9085444 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -66,16 +66,9 @@ public final class OzoneConfigKeys { "dfs.container.ratis.ipc.random.port"; public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; - - public static final String OZONE_LOCALSTORAGE_ROOT = - "ozone.localstorage.root"; - public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone"; public static final String OZONE_ENABLED = "ozone.enabled"; public static final boolean OZONE_ENABLED_DEFAULT = false; - public static final String OZONE_HANDLER_TYPE_KEY = - "ozone.handler.type"; - public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed"; public static final String OZONE_TRACE_ENABLED_KEY = "ozone.trace.enabled"; public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 320a3edd4e..ab6df920d9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -114,7 +114,6 @@ public final class OzoneConsts { * Ozone handler types. */ public static final String OZONE_HANDLER_DISTRIBUTED = "distributed"; - public static final String OZONE_HANDLER_LOCAL = "local"; public static final String DELETING_KEY_PREFIX = "#deleting#"; public static final String DELETED_KEY_PREFIX = "#deleted#"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index f2544d9f06..c3e3095e2e 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -277,17 +277,6 @@ Please note: By default ozone is disabled on a hadoop cluster. - - ozone.handler.type - distributed - OZONE, REST - - Tells ozone which storage handler to use. The possible values are: - distributed - The Ozone distributed storage handler, which speaks to - OM/SCM on the backend and provides REST services to clients. - local - Local Storage handler strictly for testing - To be removed. - - ozone.key.deleting.limit.per.task 1000 @@ -416,16 +405,6 @@ Default user permissions used in OM. - - ozone.localstorage.root - ${hadoop.tmp.dir}/ozone - OZONE, DEBUG - - This is used only for testing purposes. This value is used by the local - storage handler to simulate a REST backend. This is useful only when - debugging the REST front end independent of OM and SCM. To be removed. - - ozone.metadata.dirs diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index 7a9a09a1e1..79d1bef6ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -50,10 +50,9 @@ public interface RatisTestHelper { private final MiniOzoneCluster cluster; /** - * Create a {@link MiniOzoneCluster} for testing by setting - * OZONE_ENABLED = true, - * RATIS_ENABLED = true, and - * OZONE_HANDLER_TYPE_KEY = "distributed". + * Create a {@link MiniOzoneCluster} for testing by setting. + * OZONE_ENABLED = true + * RATIS_ENABLED = true */ public RatisTestSuite(final Class clazz) throws IOException, TimeoutException, InterruptedException { @@ -88,7 +87,6 @@ public interface RatisTestHelper { static OzoneConfiguration newOzoneConfiguration( Class clazz, RpcType rpc) { final OzoneConfiguration conf = new OzoneConfiguration(); - ContainerTestHelper.setOzoneLocalStorageRoot(clazz, conf); initRatisConf(rpc, conf); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java index 0dc039983b..73bb32d7bc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java @@ -22,8 +22,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -65,16 +63,13 @@ public class TestOzoneRestClient { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @BeforeClass public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); InetSocketAddress omHttpAddress = cluster.getOzoneManager() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java index e5ecd81409..43bd23eda1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java @@ -81,8 +81,6 @@ public class TestCloseContainerHandlingByClient { @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); chunkSize = (int)OzoneConsts.MB; blockSize = 4 * chunkSize; conf.setInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index e31b528c77..45b3843577 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -24,9 +24,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -81,16 +79,13 @@ public class TestOzoneRpcClient { /** * Create a MiniOzoneCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @BeforeClass public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 1); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).build(); cluster.waitForClusterToBeReady(); @@ -439,7 +434,7 @@ public class TestOzoneRpcClient { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - String value = RandomStringUtils.random(RandomUtils.nextInt(0,1024)); + String value = RandomStringUtils.random(RandomUtils.nextInt(0, 1024)); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index dc166b5d0d..0eb1ad4d9d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; @@ -73,15 +72,6 @@ public final class ContainerTestHelper { private ContainerTestHelper() { } - public static void setOzoneLocalStorageRoot( - Class clazz, OzoneConfiguration conf) { - String path = GenericTestUtils.getTempPath(clazz.getSimpleName()); - path += conf.getTrimmed( - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); - } - // TODO: mock multi-node pipeline /** * Create a pipeline with single node replica. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index 0d46ecfbbe..6478fc670b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -28,7 +28,6 @@ import java.util.Random; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -45,17 +44,14 @@ import org.junit.Test; */ public class TestContainerDeletionChoosingPolicy { private static String path; - private static ContainerSet containerSet; - private static OzoneConfiguration conf; + private ContainerSet containerSet; + private OzoneConfiguration conf; @Before public void init() throws Throwable { conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName()); - path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 8e11a977b7..e49e283224 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -21,21 +21,20 @@ import com.google.common.collect.Maps; import org.apache.commons.codec.binary.Hex; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdfs.server.datanode.StorageLocation; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume - .RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -45,8 +44,6 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.utils.MetadataStore; import org.junit.After; import org.junit.AfterClass; @@ -68,113 +65,80 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.ArrayList; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Stage.COMBINED; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX; import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk; import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData; -import static org.apache.hadoop.ozone.container.ContainerTestHelper - .setDataChecksum; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Stage.COMBINED; +import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; /** - * Simple tests to verify that container persistence works as expected. - * Some of these tests are specific to {@link KeyValueContainer}. If a new - * {@link ContainerProtos.ContainerType} is added, the tests need to be - * modified. + * Simple tests to verify that container persistence works as expected. Some of + * these tests are specific to {@link KeyValueContainer}. If a new {@link + * ContainerProtos.ContainerType} is added, the tests need to be modified. */ public class TestContainerPersistence { - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - + private static final String DATANODE_UUID = UUID.randomUUID().toString(); + private static final String SCM_ID = UUID.randomUUID().toString(); private static Logger log = LoggerFactory.getLogger(TestContainerPersistence.class); private static String hddsPath; - private static String path; private static OzoneConfiguration conf; - private static List pathLists = new LinkedList<>(); - private Long containerID = 8888L;; - private static final String datanodeUuid = UUID.randomUUID().toString(); - private static final String scmId = UUID.randomUUID().toString(); - private static ContainerSet containerSet; private static VolumeSet volumeSet; private static VolumeChoosingPolicy volumeChoosingPolicy; private static KeyManager keyManager; private static ChunkManager chunkManager; + @Rule + public ExpectedException exception = ExpectedException.none(); + /** + * Set the timeout for every test. + */ + @Rule + public Timeout testTimeout = new Timeout(300000); + private Long containerID = 8888L; @BeforeClass public static void init() throws Throwable { conf = new OzoneConfiguration(); hddsPath = GenericTestUtils .getTempPath(TestContainerPersistence.class.getSimpleName()); - path = hddsPath + conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath); - - File containerDir = new File(path); - if (containerDir.exists()) { - FileUtils.deleteDirectory(new File(path)); - } - Assert.assertTrue(containerDir.mkdirs()); volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); } @AfterClass public static void shutdown() throws IOException { - FileUtils.deleteDirectory(new File(path)); FileUtils.deleteDirectory(new File(hddsPath)); } @Before public void setupPaths() throws IOException { - if (!new File(path).exists() && !new File(path).mkdirs()) { - throw new IOException("Unable to create paths. " + path); - } - StorageLocation loc = StorageLocation.parse( - Paths.get(path).resolve(CONTAINER_ROOT_PREFIX).toString()); - - pathLists.clear(); containerSet = new ContainerSet(); - volumeSet = new VolumeSet(datanodeUuid, conf); + volumeSet = new VolumeSet(DATANODE_UUID, conf); keyManager = new KeyManagerImpl(conf); chunkManager = new ChunkManagerImpl(); - if (!new File(loc.getNormalizedUri()).mkdirs()) { - throw new IOException("unable to create paths. " + - loc.getNormalizedUri()); - } - pathLists.add(loc); - for (String dir : conf.getStrings(DFS_DATANODE_DATA_DIR_KEY)) { StorageLocation location = StorageLocation.parse(dir); FileUtils.forceMkdir(new File(location.getNormalizedUri())); } - } + } @After public void cleanupDir() throws IOException { // Clean up SCM metadata - log.info("Deleting {}", path); - FileUtils.deleteDirectory(new File(path)); log.info("Deleting {}", hddsPath); FileUtils.deleteDirectory(new File(hddsPath)); @@ -196,9 +160,9 @@ public class TestContainerPersistence { data.addMetadata("VOLUME", "shire"); data.addMetadata("owner)", "bilbo"); KeyValueContainer container = new KeyValueContainer(data, conf); - container.create(volumeSet, volumeChoosingPolicy, scmId); + container.create(volumeSet, volumeChoosingPolicy, SCM_ID); containerSet.addContainer(container); - return container; + return container; } @Test @@ -209,7 +173,7 @@ public class TestContainerPersistence { .containsKey(testContainerID)); KeyValueContainerData kvData = (KeyValueContainerData) containerSet.getContainer(testContainerID) - .getContainerData(); + .getContainerData(); Assert.assertNotNull(kvData); Assert.assertTrue(new File(kvData.getMetadataPath()).exists()); @@ -287,7 +251,7 @@ public class TestContainerPersistence { } @Test - public void testGetContainerReports() throws Exception{ + public void testGetContainerReports() throws Exception { final int count = 10; List containerIDs = new ArrayList<>(); @@ -296,7 +260,7 @@ public class TestContainerPersistence { Container container = addContainer(containerSet, testContainerID); // Close a bunch of containers. - if (i%3 == 0) { + if (i % 3 == 0) { container.close(); } containerIDs.add(testContainerID); @@ -307,7 +271,8 @@ public class TestContainerPersistence { List reports = containerSet.getContainerReport().getReportsList(); Assert.assertEquals(10, reports.size()); - for(StorageContainerDatanodeProtocolProtos.ContainerInfo report : reports) { + for (StorageContainerDatanodeProtocolProtos.ContainerInfo report : + reports) { long actualContainerID = report.getContainerID(); Assert.assertTrue(containerIDs.remove(actualContainerID)); } @@ -315,8 +280,8 @@ public class TestContainerPersistence { } /** - * This test creates 50 containers and reads them back 5 containers at a - * time and verifies that we did get back all containers. + * This test creates 50 containers and reads them back 5 containers at a time + * and verifies that we did get back all containers. * * @throws IOException */ @@ -426,7 +391,7 @@ public class TestContainerPersistence { sha.update(FileUtils.readFileToByteArray(fname.toFile())); String val = Hex.encodeHexString(sha.digest()); Assert.assertEquals(fileHashMap.get(fname.getFileName().toString()) - .getChecksum(), val); + .getChecksum(), val); count++; sha.reset(); } @@ -454,8 +419,8 @@ public class TestContainerPersistence { @Test public void testPartialRead() throws Exception { final int datalen = 1024; - final int start = datalen/4; - final int length = datalen/2; + final int start = datalen / 4; + final int length = datalen / 2; long testContainerID = getTestContainerID(); Container container = addContainer(containerSet, testContainerID); @@ -544,7 +509,8 @@ public class TestContainerPersistence { } // Request to read the whole data in a single go. - ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount); + ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, + datalen * chunkCount); byte[] newdata = chunkManager.readChunk(container, blockID, largeChunk); MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); newSha.update(newdata); @@ -701,8 +667,8 @@ public class TestContainerPersistence { } /** - * Tries to update an existing and non-existing container. - * Verifies container map and persistent data both updated. + * Tries to update an existing and non-existing container. Verifies container + * map and persistent data both updated. * * @throws IOException */ @@ -743,7 +709,7 @@ public class TestContainerPersistence { orgContainerFile.getAbsolutePath(), newContainerFile.getAbsolutePath()); - ContainerData actualContainerData = ContainerDataYaml.readContainerFile( + ContainerData actualContainerData = ContainerDataYaml.readContainerFile( newContainerFile); Assert.assertEquals("shire_new", actualContainerData.getMetadata().get("VOLUME")); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 08bc556330..3c77687c73 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneTestUtils; import org.apache.hadoop.ozone.client.ObjectStore; @@ -88,16 +87,11 @@ public class TestBlockDeletion { File baseDir = new File(path); baseDir.mkdirs(); - path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); - conf.setQuietMode(false); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - + conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) .setHbInterval(200) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index f40e425523..ed9c54dad3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -1,19 +1,18 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; @@ -26,8 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -54,20 +51,16 @@ public class TestCloseContainerByPipeline { private static OzoneClient client; private static ObjectStore objectStore; - /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3).build(); cluster.waitForClusterToBeReady(); @@ -243,7 +236,8 @@ public class TestCloseContainerByPipeline { () -> isContainerClosed(cluster, containerID, datanodeDetails), 500, 15 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); + Assert.assertTrue(isContainerClosed(cluster, + containerID, datanodeDetails)); } Assert.assertFalse(logCapturer.getOutput().contains( "submitting CloseContainer request over STAND_ALONE " @@ -257,13 +251,14 @@ public class TestCloseContainerByPipeline { private Boolean isContainerClosed(MiniOzoneCluster cluster, long containerID, DatanodeDetails datanode) { ContainerData containerData; - for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) + for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) { if (datanode.equals(datanodeService.getDatanodeDetails())) { containerData = datanodeService.getDatanodeStateMachine().getContainer() .getContainerSet().getContainer(containerID).getContainerData(); return containerData.isClosed(); } + } return false; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 45fd4b7174..f112d26841 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -91,8 +91,6 @@ public class TestOzoneContainer { static OzoneConfiguration newOzoneConfiguration() { final OzoneConfiguration conf = new OzoneConfiguration(); - ContainerTestHelper.setOzoneLocalStorageRoot( - TestOzoneContainer.class, conf); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java index c686b0b22f..cf469ec4eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java @@ -54,8 +54,6 @@ public class TestOzoneContainerRatis { static OzoneConfiguration newOzoneConfiguration() { final OzoneConfiguration conf = new OzoneConfiguration(); - ContainerTestHelper.setOzoneLocalStorageRoot( - TestOzoneContainerRatis.class, conf); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java index 12c2b7b18c..a0c8cfe6e9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java @@ -47,8 +47,6 @@ public class TestRatisManager { static OzoneConfiguration newOzoneConfiguration() { final OzoneConfiguration conf = new OzoneConfiguration(); - ContainerTestHelper.setOzoneLocalStorageRoot( - TestRatisManager.class, conf); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 8b8072c44d..0a3c3918d4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -20,8 +20,6 @@ package org.apache.hadoop.ozone.freon; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; import org.junit.Assert; @@ -45,16 +43,13 @@ public class TestDataValidate { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5).build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java index 203cf4d72f..022d6b590c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java @@ -41,8 +41,7 @@ public class TestFreon { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index c66b3de0f5..c24cfbf6c0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.*; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.common.impl.ContainerData; @@ -33,7 +31,6 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -60,16 +57,13 @@ public class TestContainerReportWithKeys { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); scm = cluster.getStorageContainerManager(); @@ -117,10 +111,6 @@ public class TestContainerReportWithKeys { cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); - ContainerData cd = getContainerData(keyInfo.getContainerID()); - -/* LOG.info("DN Container Data: keyCount: {} used: {} ", - cd.getKeyCount(), cd.getBytesUsed());*/ ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java index 1389cbaa98..f1ff2cfcd0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java @@ -62,8 +62,7 @@ public class TestMultipleContainerReadWrite { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @@ -73,8 +72,6 @@ public class TestMultipleContainerReadWrite { // set to as small as 100 bytes per block. conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 1); conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 0eb1677ca9..9f97a2cbfc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -22,8 +22,6 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -67,16 +65,13 @@ public class TestOmBlockVersioning { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 8d0f4b2129..8063981de7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -23,9 +23,7 @@ import java.io.IOException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -52,8 +50,6 @@ public class TestOmMetrics { @Before public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); ozoneManager = cluster.getOzoneManager(); @@ -104,7 +100,8 @@ public class TestOmMetrics { Mockito.doThrow(exception).when(mockVm).setOwner(null, null); Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0); - org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, "volumeManager", mockVm); + org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, + "volumeManager", mockVm); doVolumeOps(); omMetrics = getMetrics("OMMetrics"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java index 005a0124e8..ab26c00141 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.scm.cli.SQLCLI; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; @@ -97,16 +96,13 @@ public class TestOmSQLCli { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @Before public void setup() throws Exception { conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 5109453d9b..4908c4daf6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -108,8 +108,7 @@ public class TestOzoneManager { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @@ -119,8 +118,6 @@ public class TestOzoneManager { clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); omId = UUID.randomUUID().toString(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); cluster = MiniOzoneCluster.newBuilder(conf) .setClusterId(clusterId) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index 573f097e34..528828be8a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -134,11 +133,6 @@ public class TestOzoneShell { baseDir = new File(path); baseDir.mkdirs(); - path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); - conf.setQuietMode(false); shell = new Shell(); shell.setConf(conf); @@ -146,6 +140,7 @@ public class TestOzoneShell { .setNumDatanodes(3) .build(); conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()); + conf.setQuietMode(false); client = new RpcClient(conf); cluster.waitForClusterToBeReady(); } @@ -208,12 +203,15 @@ public class TestOzoneShell { testCreateVolume(volumeName, ""); volumeName = "volume" + RandomStringUtils.randomNumeric(5); testCreateVolume("/////" + volumeName, ""); - testCreateVolume("/////", "Volume name is required to create a volume"); + testCreateVolume("/////", "Volume name is required " + + "to create a volume"); testCreateVolume("/////vol/123", - "Illegal argument: Bucket or Volume name has an unsupported character : /"); + "Illegal argument: Bucket or Volume name has " + + "an unsupported character : /"); } - private void testCreateVolume(String volumeName, String errorMsg) throws Exception { + private void testCreateVolume(String volumeName, String errorMsg) + throws Exception { err.reset(); String userName = "bilbo"; String[] args = new String[] {"-createVolume", url + "/" + volumeName, @@ -397,7 +395,7 @@ public class TestOzoneShell { // test -prefix option out.reset(); - args = new String[] { "-listVolume", url + "/", "-user", user1, "-length", + args = new String[] {"-listVolume", url + "/", "-user", user1, "-length", "100", "-prefix", "test-vol-" + protocol + "1" }; assertEquals(0, ToolRunner.run(shell, args)); commandOutput = out.toString(); @@ -414,7 +412,7 @@ public class TestOzoneShell { // test -start option out.reset(); - args = new String[] { "-listVolume", url + "/", "-user", user2, "-length", + args = new String[] {"-listVolume", url + "/", "-user", user2, "-length", "100", "-start", "test-vol-" + protocol + "15" }; assertEquals(0, ToolRunner.run(shell, args)); commandOutput = out.toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java deleted file mode 100644 index 441f771e84..0000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.TestOzoneHelper; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import java.io.IOException; - -/** - * Test ozone volume in the local storage handler scenario. - */ -public class TestLocalOzoneVolumes extends TestOzoneHelper { - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static MiniOzoneCluster cluster = null; - private static int port = 0; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to - * emulate Ozone backend. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - - String path = GenericTestUtils - .getTempPath(TestLocalOzoneVolumes.class.getSimpleName()); - path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); - Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); - - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - port = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails().getPort( - DatanodeDetails.Port.Name.REST).getValue(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * Creates Volumes on Ozone Store. - * - * @throws IOException - */ - @Test - public void testCreateVolumes() throws IOException { - super.testCreateVolumes(port); - } - - /** - * Create Volumes with Quota. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithQuota() throws IOException { - super.testCreateVolumesWithQuota(port); - } - - /** - * Create Volumes with Invalid Quota. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithInvalidQuota() throws IOException { - super.testCreateVolumesWithInvalidQuota(port); - } - - /** - * To create a volume a user name must be specified using OZONE_USER header. - * This test verifies that we get an error in case we call without a OZONE - * user name. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithInvalidUser() throws IOException { - super.testCreateVolumesWithInvalidUser(port); - } - - /** - * Only Admins can create volumes in Ozone. This test uses simple userauth as - * backend and hdfs and root are admin users in the simple backend. - *

- * This test tries to create a volume as user bilbo. - * - * @throws IOException - */ - @Test - public void testCreateVolumesWithOutAdminRights() throws IOException { - super.testCreateVolumesWithOutAdminRights(port); - } - - /** - * Create a bunch of volumes in a loop. - * - * @throws IOException - */ - //@Test - public void testCreateVolumesInLoop() throws IOException { - super.testCreateVolumesInLoop(port); - } - /** - * Get volumes owned by the user. - * - * @throws IOException - */ - @Test - public void testGetVolumesByUser() throws IOException { - super.testGetVolumesByUser(port); - } - - /** - * Admins can read volumes belonging to other users. - * - * @throws IOException - */ - @Test - public void testGetVolumesOfAnotherUser() throws IOException { - super.testGetVolumesOfAnotherUser(port); - } - - /** - * if you try to read volumes belonging to another user, - * then server always ignores it. - * - * @throws IOException - */ - @Test @Ignore - public void testGetVolumesOfAnotherUserShouldFail() throws IOException { - super.testGetVolumesOfAnotherUserShouldFail(port); - } - - @Test - public void testListKeyOnEmptyBucket() throws IOException { - super.testListKeyOnEmptyBucket(port); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java similarity index 92% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java index e592d560d3..290e834057 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java @@ -19,9 +19,7 @@ package org.apache.hadoop.ozone.web; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestOzoneHelper; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -39,9 +37,9 @@ import java.io.IOException; /** * Test ozone volume in the distributed storage handler scenario. */ -public class TestDistributedOzoneVolumes extends TestOzoneHelper { +public class TestOzoneVolumes extends TestOzoneHelper { private static final org.slf4j.Logger LOG = - LoggerFactory.getLogger(TestDistributedOzoneVolumes.class); + LoggerFactory.getLogger(TestOzoneVolumes.class); /** * Set the timeout for every test. */ @@ -54,8 +52,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @@ -63,8 +60,6 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper { public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); port = cluster.getHddsDatanodes().get(0) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java index c014a60e63..9828b95258 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java @@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.web; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.headers.Header; @@ -62,20 +61,13 @@ public class TestOzoneWebAccess { /** * Create a MiniDFSCluster for testing. * - * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to - * emulate Ozone backend. - * + * Ozone is made active by setting OZONE_ENABLED = true * @throws IOException */ @BeforeClass public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String path = GenericTestUtils - .getTempPath(TestOzoneWebAccess.class.getSimpleName()); - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); - cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); port = cluster.getHddsDatanodes().get(0) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java index 39aa03c143..2eedb58abd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.web.client; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.VolumeArgs; @@ -86,9 +85,7 @@ public class TestBuckets { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to - * emulate Ozone backend. + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @@ -100,10 +97,6 @@ public class TestBuckets { String path = GenericTestUtils .getTempPath(TestBuckets.class.getSimpleName()); - path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java index 6a9202272b..5eeeacf15b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java @@ -19,9 +19,9 @@ package org.apache.hadoop.ozone.web.client; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -58,7 +58,7 @@ public class TestKeysRatis { @BeforeClass public static void init() throws Exception { suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class); - path = suite.getConf().get(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT); + path = GenericTestUtils.getTempPath(TestKeysRatis.class.getSimpleName()); ozoneCluster = suite.getCluster(); ozoneCluster.waitForClusterToBeReady(); client = suite.newOzoneClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java index 86de8dff65..f8f57d70f8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java @@ -45,7 +45,6 @@ import io.netty.handler.logging.LoggingHandler; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.headers.Header; @@ -92,8 +91,6 @@ public class TestOzoneClient { public static void init() throws Exception { Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.ALL); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); int port = cluster.getHddsDatanodes().get(0) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java index c9a0c38fa9..31f9214ba6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java @@ -23,7 +23,6 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rest.RestClient; @@ -79,9 +78,7 @@ public class TestVolume { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to - * emulate Ozone backend. + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @@ -91,11 +88,8 @@ public class TestVolume { String path = GenericTestUtils .getTempPath(TestVolume.class.getSimpleName()); - path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); FileUtils.deleteDirectory(new File(path)); - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); cluster = MiniOzoneCluster.newBuilder(conf).build(); @@ -221,7 +215,8 @@ public class TestVolume { client.createVolume(volumeName); client.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB")); OzoneVolume newVol = client.getVolumeDetails(volumeName); - assertEquals(newVol.getQuota(), OzoneQuota.parseQuota("1000MB").sizeInBytes()); + assertEquals(newVol.getQuota(), + OzoneQuota.parseQuota("1000MB").sizeInBytes()); // verify if the creation time is missing after setting quota operation assertTrue(newVol.getCreationTime() > 0); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java index d6783ad8e5..1a05a3cab0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java @@ -72,11 +72,8 @@ public class TestVolumeRatis { String path = GenericTestUtils .getTempPath(TestVolume.class.getSimpleName()); - path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); FileUtils.deleteDirectory(new File(path)); - conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path); Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java index 2200cd8887..f56cbe8223 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java @@ -1,28 +1,44 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.OmUtils.getOmAddress; -import static org.apache.hadoop.ozone.OzoneConfigKeys.*; -import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS; -import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE; +import com.sun.jersey.api.container.ContainerFactory; +import com.sun.jersey.api.core.ApplicationAdapter; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ipc.Client; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; +import org.apache.hadoop.ozone.web.ObjectStoreApplication; +import org.apache.hadoop.ozone.web.handlers.ServiceFilter; +import org.apache.hadoop.ozone.web.interfaces.StorageHandler; +import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer; +import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.Closeable; import java.io.IOException; @@ -30,35 +46,13 @@ import java.net.InetSocketAddress; import java.util.HashMap; import java.util.Map; -import com.sun.jersey.api.container.ContainerFactory; -import com.sun.jersey.api.core.ApplicationAdapter; - -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.web.ObjectStoreApplication; -import org.apache.hadoop.ozone.web.handlers.ServiceFilter; -import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer; -import org.apache.hadoop.hdds.scm.protocolPB - .ScmBlockLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler; -import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler; -import org.apache.hadoop.security.UserGroupInformation; +import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE; +import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; +import static org.apache.hadoop.ozone.OmUtils.getOmAddress; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY; /** * Implements object store handling within the DataNode process. This class is @@ -86,69 +80,49 @@ public final class ObjectStoreHandler implements Closeable { * @throws IOException if there is an I/O error */ public ObjectStoreHandler(Configuration conf) throws IOException { - String shType = conf.getTrimmed(OZONE_HANDLER_TYPE_KEY, - OZONE_HANDLER_TYPE_DEFAULT); - LOG.info("ObjectStoreHandler initializing with {}: {}", - OZONE_HANDLER_TYPE_KEY, shType); boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY, OZONE_TRACE_ENABLED_DEFAULT); // Initialize Jersey container for object store web application. - if (OzoneConsts.OZONE_HANDLER_DISTRIBUTED.equalsIgnoreCase(shType)) { - RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class); + RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, + ProtobufRpcEngine.class); + long scmVersion = + RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddress = - getScmAddressForClients(conf); - this.storageContainerLocationClient = - new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion, - scmAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); + InetSocketAddress scmAddress = + getScmAddressForClients(conf); + this.storageContainerLocationClient = + new StorageContainerLocationProtocolClientSideTranslatorPB( + RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion, + scmAddress, UserGroupInformation.getCurrentUser(), conf, + NetUtils.getDefaultSocketFactory(conf), + Client.getRpcTimeout(conf))); - InetSocketAddress scmBlockAddress = - getScmAddressForBlockClients(conf); - this.scmBlockLocationClient = - new ScmBlockLocationProtocolClientSideTranslatorPB( - RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion, - scmBlockAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); + InetSocketAddress scmBlockAddress = + getScmAddressForBlockClients(conf); + this.scmBlockLocationClient = + new ScmBlockLocationProtocolClientSideTranslatorPB( + RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion, + scmBlockAddress, UserGroupInformation.getCurrentUser(), conf, + NetUtils.getDefaultSocketFactory(conf), + Client.getRpcTimeout(conf))); - RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, - ProtobufRpcEngine.class); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); - InetSocketAddress omAddress = getOmAddress(conf); - this.ozoneManagerClient = - new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - omAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); + RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, + ProtobufRpcEngine.class); + long omVersion = + RPC.getProtocolVersion(OzoneManagerProtocolPB.class); + InetSocketAddress omAddress = getOmAddress(conf); + this.ozoneManagerClient = + new OzoneManagerProtocolClientSideTranslatorPB( + RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, + omAddress, UserGroupInformation.getCurrentUser(), conf, + NetUtils.getDefaultSocketFactory(conf), + Client.getRpcTimeout(conf))); - storageHandler = new DistributedStorageHandler( - new OzoneConfiguration(conf), - this.storageContainerLocationClient, - this.ozoneManagerClient); - } else { - if (OzoneConsts.OZONE_HANDLER_LOCAL.equalsIgnoreCase(shType)) { - storageHandler = new LocalStorageHandler(conf); - this.storageContainerLocationClient = null; - this.scmBlockLocationClient = null; - this.ozoneManagerClient = null; - } else { - throw new IllegalArgumentException( - String.format("Unrecognized value for %s: %s," - + " Allowed values are %s,%s", - OZONE_HANDLER_TYPE_KEY, shType, - OzoneConsts.OZONE_HANDLER_DISTRIBUTED, - OzoneConsts.OZONE_HANDLER_LOCAL)); - } - } + storageHandler = new DistributedStorageHandler( + new OzoneConfiguration(conf), + this.storageContainerLocationClient, + this.ozoneManagerClient); ApplicationAdapter aa = new ApplicationAdapter(new ObjectStoreApplication()); Map settingsMap = new HashMap<>(); diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java index f86f247ac7..b3c3391433 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java @@ -19,10 +19,11 @@ package org.apache.hadoop.ozone.web.handlers; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; /** * This class is responsible for providing a {@link StorageHandler} @@ -31,6 +32,9 @@ import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler; @InterfaceAudience.Private public final class StorageHandlerBuilder { + + private static final Logger LOG = + LoggerFactory.getLogger(StorageHandlerBuilder.class); private static final ThreadLocal STORAGE_HANDLER_THREAD_LOCAL = new ThreadLocal<>(); @@ -40,15 +44,15 @@ public final class StorageHandlerBuilder { * * @return StorageHandler from thread-local storage */ - public static StorageHandler getStorageHandler() { + public static StorageHandler getStorageHandler() throws IOException { StorageHandler storageHandler = STORAGE_HANDLER_THREAD_LOCAL.get(); if (storageHandler != null) { return storageHandler; } else { - // This only happens while using mvn jetty:run for testing. - Configuration conf = new OzoneConfiguration(); - return new LocalStorageHandler(conf); + LOG.error("No Storage Handler Configured."); + throw new IOException("Invalid Handler Configuration"); } + } /** diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java deleted file mode 100644 index 89158cb521..0000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.localstorage; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.ListArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.web.response.KeyInfo; -import org.apache.hadoop.ozone.web.response.ListBuckets; -import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.ozone.web.response.VolumeInfo; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * PLEASE NOTE : This file is a dummy backend for test purposes and prototyping - * effort only. It does not handle any Object semantics correctly, neither does - * it take care of security. - */ -@InterfaceAudience.Private -public class LocalStorageHandler implements StorageHandler { - private final Configuration conf; - - /** - * Constructs LocalStorageHandler. - * - * @param conf ozone conf. - */ - public LocalStorageHandler(Configuration conf) { - this.conf = conf; - } - - /** - * Creates Storage Volume. - * - * @param args - volumeArgs - * @throws IOException - */ - @Override - public void createVolume(VolumeArgs args) throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.createVolume(args); - - } - - /** - * setVolumeOwner - sets the owner of the volume. - * - * @param args volumeArgs - * @throws IOException - */ - @Override - public void setVolumeOwner(VolumeArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.setVolumeProperty(args, OzoneMetadataManager.VolumeProperty.OWNER); - } - - /** - * Set Volume Quota Info. - * - * @param args - volumeArgs - * @param remove - true if the request is to remove the quota - * @throws IOException - */ - @Override - public void setVolumeQuota(VolumeArgs args, boolean remove) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - - if (remove) { - OzoneQuota quota = new OzoneQuota(); - args.setQuota(quota); - } - oz.setVolumeProperty(args, OzoneMetadataManager.VolumeProperty.QUOTA); - } - - /** - * Checks if a Volume exists and the user specified has access to the volume. - * - * @param volume - Volume Name - * @param acl - Ozone acl which needs to be compared for access - * @return - Boolean - True if the user can modify the volume. This is - * possible for owners of the volume and admin users - * @throws IOException - */ - @Override - public boolean checkVolumeAccess(String volume, OzoneAcl acl) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.checkVolumeAccess(volume, acl); - } - - /** - * Returns Info about the specified Volume. - * - * @param args - volumeArgs - * @return VolumeInfo - * @throws IOException - */ - @Override - public VolumeInfo getVolumeInfo(VolumeArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.getVolumeInfo(args); - } - - /** - * Deletes an Empty Volume. - * - * @param args - Volume Args - * @throws IOException - */ - @Override - public void deleteVolume(VolumeArgs args) throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.deleteVolume(args); - - } - - /** - * Returns the List of Volumes owned by the specific user. - * - * @param args - ListArgs - * @return - List of Volumes - * @throws IOException - */ - @Override - public ListVolumes listVolumes(ListArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.listVolumes(args); - } - - /** - * true if the bucket exists and user has read access to the bucket else - * throws Exception. - * - * @param args Bucket args structure - * @throws IOException - */ - @Override - public void checkBucketAccess(BucketArgs args) - throws IOException, OzoneException { - - } - - /** - * Creates a Bucket in specified Volume. - * - * @param args BucketArgs- BucketName, UserName and Acls - * @throws IOException - */ - @Override - public void createBucket(BucketArgs args) throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.createBucket(args); - } - - /** - * Adds or Removes ACLs from a Bucket. - * - * @param args - BucketArgs - * @throws IOException - */ - @Override - public void setBucketAcls(BucketArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.setBucketProperty(args, OzoneMetadataManager.BucketProperty.ACLS); - } - - /** - * Enables or disables Bucket Versioning. - * - * @param args - BucketArgs - * @throws IOException - */ - @Override - public void setBucketVersioning(BucketArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.setBucketProperty(args, OzoneMetadataManager.BucketProperty.VERSIONING); - - } - - /** - * Sets the Storage Class of a Bucket. - * - * @param args - BucketArgs - * @throws IOException - */ - @Override - public void setBucketStorageClass(BucketArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.setBucketProperty(args, OzoneMetadataManager.BucketProperty.STORAGETYPE); - - } - - /** - * Deletes a bucket if it is empty. - * - * @param args Bucket args structure - * @throws IOException - */ - @Override - public void deleteBucket(BucketArgs args) throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.deleteBucket(args); - } - - /** - * Returns all Buckets of a specified Volume. - * - * @param args --User Args - * @return ListAllBuckets - * @throws OzoneException - */ - @Override - public ListBuckets listBuckets(ListArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.listBuckets(args); - } - - /** - * Returns Bucket's Metadata as a String. - * - * @param args Bucket args structure - * @return Info about the bucket - * @throws IOException - */ - @Override - public BucketInfo getBucketInfo(BucketArgs args) - throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.getBucketInfo(args); - } - - /** - * Writes a key in an existing bucket. - * - * @param args KeyArgs - * @return InputStream - * @throws OzoneException - */ - @Override - public OutputStream newKeyWriter(KeyArgs args) throws IOException, - OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.createKey(args); - } - - /** - * Tells the file system that the object has been written out completely and - * it can do any house keeping operation that needs to be done. - * - * @param args Key Args - * @param stream - * @throws IOException - */ - @Override - public void commitKey(KeyArgs args, OutputStream stream) throws - IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.commitKey(args, stream); - - } - - /** - * Reads a key from an existing bucket. - * - * @param args KeyArgs - * @return LengthInputStream - * @throws IOException - */ - @Override - public LengthInputStream newKeyReader(KeyArgs args) throws IOException, - OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.newKeyReader(args); - } - - /** - * Deletes an existing key. - * - * @param args KeyArgs - * @throws OzoneException - */ - @Override - public void deleteKey(KeyArgs args) throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - oz.deleteKey(args); - } - - @Override - public void renameKey(KeyArgs args, String toKeyName) - throws IOException, OzoneException { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Returns a list of Key. - * - * @param args KeyArgs - * @return BucketList - * @throws IOException - */ - @Override - public ListKeys listKeys(ListArgs args) throws IOException, OzoneException { - OzoneMetadataManager oz = - OzoneMetadataManager.getOzoneMetadataManager(conf); - return oz.listKeys(args); - - } - - /** - * Get information of the specified Key. - * - * @param args Key Args - * - * @return KeyInfo - * - * @throws IOException - * @throws OzoneException - */ - @Override - public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException { - OzoneMetadataManager oz = OzoneMetadataManager - .getOzoneMetadataManager(conf); - return oz.getKeyInfo(args); - } - - @Override - public void close() { - //No resource to close, do nothing. - } - -} diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java deleted file mode 100644 index 1fe9a18241..0000000000 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java +++ /dev/null @@ -1,1138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.localstorage; - -import com.google.common.base.Preconditions; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.web.exceptions.ErrorTable; -import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.web.handlers.BucketArgs; -import org.apache.hadoop.ozone.web.handlers.KeyArgs; -import org.apache.hadoop.ozone.web.handlers.ListArgs; -import org.apache.hadoop.ozone.web.handlers.UserArgs; -import org.apache.hadoop.ozone.web.handlers.VolumeArgs; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.web.response.KeyInfo; -import org.apache.hadoop.ozone.web.response.ListBuckets; -import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.response.ListVolumes; -import org.apache.hadoop.ozone.web.response.VolumeInfo; -import org.apache.hadoop.ozone.web.response.VolumeOwner; -import org.apache.hadoop.utils.MetadataStore; -import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.charset.Charset; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.List; -import java.util.ListIterator; -import java.util.Locale; -import java.util.TimeZone; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * A stand alone Ozone implementation that allows us to run Ozone tests in local - * mode. This acts as the ozone backend when using MiniDFSCluster for testing. - */ -public final class OzoneMetadataManager { - - /* - OzoneMetadataManager manages volume/bucket/object metadata and - data. - - Metadata is maintained in 2 level DB files, UserDB and MetadataDB. - - UserDB contains a Name and a List. For example volumes owned by the user - bilbo, would be maintained in UserDB as {bilbo}->{shire, rings} - - This list part of mapping is context sensitive. That is, if you use {user - name} as the key, the list you get is a list of volumes. if you use - {user/volume} as the key the list you get is list of buckets. if you use - {user/volume/bucket} as key the list you get is the list of objects. - - All keys in the UserDB starts with the UserName. - - We also need to maintain a flat namespace for volumes. This is - maintained by the MetadataDB. MetadataDB contains the name of an - object(volume, bucket or key) and its associated metadata. - The keys in the Metadata DB are {volume}, {volume/bucket} or - {volume/bucket/key}. User name is absent, so we have a common root name - space for the volume. - - The value of part of metadataDB points to corresponding *Info structures. - {volume] -> volumeInfo - {volume/bucket} -> bucketInfo - {volume/bucket/key} -> keyInfo - - - Here are various work flows : - - CreateVolume -> Check if Volume exists in metadataDB, if not update UserDB - with a list of volumes and update metadataDB with VolumeInfo. - - DeleteVolume -> Check the Volume, and check the VolumeInfo->bucketCount. - if bucketCount == 0, delete volume from userDB->{List of volumes} and - metadataDB. - - Very similar work flows exist for CreateBucket and DeleteBucket. - - // Please note : These database operations are *not* transactional, - // which means that failure can lead to inconsistencies. - // Only way to recover is to reset to a clean state, or - // use rm -rf /tmp/ozone :) - - We have very simple locking policy. We have a ReaderWriter lock that is - taken for each action, this lock is aptly named "lock". - - All actions *must* be performed with a lock held, either a read - lock or a write lock. Violation of these locking policies can be harmful. - - - // // IMPORTANT : - // // This is a simulation layer, this is NOT how the real - // // OZONE functions. This is written to so that we can write - // // stand-alone tests for the protocol and client code. - -*/ - static final Logger LOG = LoggerFactory.getLogger(OzoneMetadataManager.class); - private static final String USER_DB = "/user.db"; - private static final String META_DB = "/metadata.db"; - private static OzoneMetadataManager bm = null; - private MetadataStore userDB; - private MetadataStore metadataDB; - private ReadWriteLock lock; - private Charset encoding = Charset.forName("UTF-8"); - private String storageRoot; - private static final String OBJECT_DIR = "/_objects/"; - - // This table keeps a pointer to objects whose operations - // are in progress but not yet committed to persistent store - private ConcurrentHashMap inProgressObjects; - - /** - * Constructs OzoneMetadataManager. - */ - private OzoneMetadataManager(Configuration conf) throws IOException { - - lock = new ReentrantReadWriteLock(); - storageRoot = - conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, - OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT); - - File file = new File(storageRoot + OBJECT_DIR); - - if (!file.exists() && !file.mkdirs()) { - LOG.error("Creation of Ozone root failed. " + file.toString()); - throw new IOException("Creation of Ozone root failed."); - } - - try { - userDB = MetadataStoreBuilder.newBuilder() - .setDbFile(new File(storageRoot + USER_DB)) - .setCreateIfMissing(true) - .build(); - metadataDB = MetadataStoreBuilder.newBuilder() - .setDbFile(new File(storageRoot + META_DB)) - .setCreateIfMissing(true) - .build(); - inProgressObjects = new ConcurrentHashMap<>(); - } catch (IOException ex) { - LOG.error("Cannot open db :" + ex.getMessage()); - throw ex; - } - } - - /** - * Gets Ozone Manager. - * - * @return OzoneMetadataManager - */ - public static synchronized OzoneMetadataManager - getOzoneMetadataManager(Configuration conf) throws IOException { - if (bm == null) { - bm = new OzoneMetadataManager(conf); - } - return bm; - } - - /** - * Creates a volume. - * - * @param args - VolumeArgs - * @throws OzoneException - */ - public void createVolume(VolumeArgs args) throws OzoneException { - lock.writeLock().lock(); - try { - SimpleDateFormat format = - new SimpleDateFormat(OzoneConsts.OZONE_DATE_FORMAT, Locale.US); - format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); - - byte[] volumeName = - metadataDB.get(args.getVolumeName().getBytes(encoding)); - - if (volumeName != null) { - LOG.debug("Volume {} already exists.", volumeName); - throw ErrorTable.newError(ErrorTable.VOLUME_ALREADY_EXISTS, args); - } - - VolumeInfo newVInfo = new VolumeInfo(args.getVolumeName(), format - .format(new Date(System.currentTimeMillis())), args.getAdminName()); - - newVInfo.setQuota(args.getQuota()); - VolumeOwner owner = new VolumeOwner(args.getUserName()); - newVInfo.setOwner(owner); - - ListVolumes volumeList; - byte[] userVolumes = userDB.get(args.getUserName().getBytes(encoding)); - if (userVolumes == null) { - volumeList = new ListVolumes(); - } else { - volumeList = ListVolumes.parse(new String(userVolumes, encoding)); - } - - volumeList.addVolume(newVInfo); - volumeList.sort(); - - // Please note : These database operations are *not* transactional, - // which means that failure can lead to inconsistencies. - // Only way to recover is to reset to a clean state, or - // use rm -rf /tmp/ozone :) - - - userDB.put(args.getUserName().getBytes(encoding), - volumeList.toDBString().getBytes(encoding)); - - metadataDB.put(args.getVolumeName().getBytes(encoding), - newVInfo.toDBString().getBytes(encoding)); - - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Updates the Volume properties like Owner Name and Quota. - * - * @param args - Volume Args - * @param property - Flag which tells us what property to upgrade - * @throws OzoneException - */ - public void setVolumeProperty(VolumeArgs args, VolumeProperty property) - throws OzoneException { - lock.writeLock().lock(); - try { - byte[] volumeInfo = - metadataDB.get(args.getVolumeName().getBytes(encoding)); - if (volumeInfo == null) { - throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); - } - VolumeInfo info = VolumeInfo.parse(new String(volumeInfo, encoding)); - - byte[] userBytes = userDB.get(args.getResourceName().getBytes(encoding)); - ListVolumes volumeList; - if (userBytes == null) { - volumeList = new ListVolumes(); - } else { - volumeList = ListVolumes.parse(new String(userBytes, encoding)); - } - - switch (property) { - case OWNER: - // needs new owner, we delete the volume object from the - // old user's volume list - removeOldOwner(info); - VolumeOwner owner = new VolumeOwner(args.getUserName()); - // set the new owner - info.setOwner(owner); - break; - case QUOTA: - // if this is quota update we just remove the old object from the - // current users list and update the same object later. - volumeList.getVolumes().remove(info); - info.setQuota(args.getQuota()); - break; - default: - OzoneException ozEx = - ErrorTable.newError(ErrorTable.BAD_PROPERTY, args); - ozEx.setMessage("Volume property is not recognized"); - throw ozEx; - } - - volumeList.addVolume(info); - - metadataDB.put(args.getVolumeName().getBytes(encoding), - info.toDBString().getBytes(encoding)); - - // if this is an owner change this put will create a new owner or update - // the owner's volume list. - userDB.put(args.getResourceName().getBytes(encoding), - volumeList.toDBString().getBytes(encoding)); - - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Removes the old owner from the volume. - * - * @param info - VolumeInfo - * @throws IOException - */ - private void removeOldOwner(VolumeInfo info) throws IOException { - // We need to look the owner that we know is the current owner - byte[] volumeBytes = - userDB.get(info.getOwner().getName().getBytes(encoding)); - ListVolumes volumeList = - ListVolumes.parse(new String(volumeBytes, encoding)); - volumeList.getVolumes().remove(info); - - // Write the new list info to the old user data - userDB.put(info.getOwner().getName().getBytes(encoding), - volumeList.toDBString().getBytes(encoding)); - } - - /** - * Checks if you are the owner of a specific volume. - * - * @param volume - Volume Name whose access permissions needs to be checked - * @param acl - requested acls which needs to be checked for access - * @return - True if you are the owner, false otherwise - * @throws OzoneException - */ - public boolean checkVolumeAccess(String volume, OzoneAcl acl) - throws OzoneException { - lock.readLock().lock(); - try { - byte[] volumeInfo = - metadataDB.get(volume.getBytes(encoding)); - if (volumeInfo == null) { - throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, null); - } - - VolumeInfo info = VolumeInfo.parse(new String(volumeInfo, encoding)); - return info.getOwner().getName().equals(acl.getName()); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, null, ex); - } finally { - lock.readLock().unlock(); - } - } - - /** - * getVolumeInfo returns the Volume Info of a specific volume. - * - * @param args - Volume args - * @return VolumeInfo - * @throws OzoneException - */ - public VolumeInfo getVolumeInfo(VolumeArgs args) throws OzoneException { - lock.readLock().lock(); - try { - byte[] volumeInfo = - metadataDB.get(args.getVolumeName().getBytes(encoding)); - if (volumeInfo == null) { - throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); - } - - return VolumeInfo.parse(new String(volumeInfo, encoding)); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns all the volumes owned by a specific user. - * - * @param args - User Args - * @return - ListVolumes - * @throws OzoneException - */ - public ListVolumes listVolumes(ListArgs args) throws OzoneException { - lock.readLock().lock(); - try { - if (args.isRootScan()) { - return listAllVolumes(args); - } - - UserArgs uArgs = (UserArgs) args.getArgs(); - byte[] volumeList = userDB.get(uArgs.getUserName().getBytes(encoding)); - if (volumeList == null) { - throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, uArgs); - } - - String prefix = args.getPrefix(); - int maxCount = args.getMaxKeys(); - String prevKey = args.getPrevKey(); - if (prevKey != null) { - // Format is username/volumeName, in local mode we don't use the - // user name since we have a userName DB. - String[] volName = args.getPrevKey().split("/"); - if (volName.length < 2) { - throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, uArgs); - } - prevKey = volName[1]; - } - return getFilteredVolumes(volumeList, prefix, prevKey, maxCount); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args.getArgs(), ex); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns a List of Volumes that meet the prefix, prevkey and maxCount - * constraints. - * - * @param volumeList - Byte Array of Volume Info. - * @param prefix - prefix string. - * @param prevKey - PrevKey - * @param maxCount - Maximum Count. - * @return ListVolumes. - * @throws IOException - */ - private ListVolumes getFilteredVolumes(byte[] volumeList, String prefix, - String prevKey, int maxCount) throws - IOException { - ListVolumes volumes = ListVolumes.parse(new String(volumeList, - encoding)); - int currentCount = 0; - ListIterator iter = volumes.getVolumes().listIterator(); - ListVolumes filteredVolumes = new ListVolumes(); - while (currentCount < maxCount && iter.hasNext()) { - VolumeInfo vInfo = iter.next(); - if (isMatchingPrefix(prefix, vInfo) && isAfterKey(prevKey, vInfo)) { - filteredVolumes.addVolume(vInfo); - currentCount++; - } - } - return filteredVolumes; - } - - /** - * Returns all volumes in a cluster. - * - * @param args - ListArgs. - * @return ListVolumes. - * @throws OzoneException - */ - public ListVolumes listAllVolumes(ListArgs args) - throws OzoneException, IOException { - String prefix = args.getPrefix(); - final String prevKey; - int maxCount = args.getMaxKeys(); - String userName = null; - - if (args.getPrevKey() != null) { - // Format is username/volumeName - String[] volName = args.getPrevKey().split("/"); - if (volName.length < 2) { - throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args.getArgs()); - } - - byte[] userNameBytes = userDB.get(volName[0].getBytes(encoding)); - userName = new String(userNameBytes, encoding); - prevKey = volName[1]; - } else { - userName = new String(userDB.peekAround(0, null).getKey(), encoding); - prevKey = null; - } - - if (userName == null || userName.isEmpty()) { - throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args.getArgs()); - } - - ListVolumes returnSet = new ListVolumes(); - // we need to iterate through users until we get maxcount volumes - // or no more volumes are left. - userDB.iterate(null, (key, value) -> { - int currentSize = returnSet.getVolumes().size(); - if (currentSize < maxCount) { - String name = new String(key, encoding); - byte[] volumeList = userDB.get(name.getBytes(encoding)); - if (volumeList == null) { - throw new IOException( - ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args.getArgs())); - } - returnSet.getVolumes().addAll( - getFilteredVolumes(volumeList, prefix, prevKey, - maxCount - currentSize).getVolumes()); - return true; - } else { - return false; - } - }); - - return returnSet; - } - - /** - * Checks if a name starts with a matching prefix. - * - * @param prefix - prefix string. - * @param vInfo - volume info. - * @return true or false. - */ - private boolean isMatchingPrefix(String prefix, VolumeInfo vInfo) { - if (prefix == null || prefix.isEmpty()) { - return true; - } - return vInfo.getVolumeName().startsWith(prefix); - } - - /** - * Checks if the key is after the prevKey. - * - * @param prevKey - String prevKey. - * @param vInfo - volume Info. - * @return - true or false. - */ - private boolean isAfterKey(String prevKey, VolumeInfo vInfo) { - if (prevKey == null || prevKey.isEmpty()) { - return true; - } - return prevKey.compareTo(vInfo.getVolumeName()) < 0; - } - - /** - * Deletes a volume if it exists and is empty. - * - * @param args - volume args - * @throws OzoneException - */ - public void deleteVolume(VolumeArgs args) throws OzoneException { - lock.writeLock().lock(); - try { - byte[] volumeName = - metadataDB.get(args.getVolumeName().getBytes(encoding)); - if (volumeName == null) { - throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); - } - - VolumeInfo vInfo = VolumeInfo.parse(new String(volumeName, encoding)); - - // Only remove volumes if they are empty. - if (vInfo.getBucketCount() > 0) { - throw ErrorTable.newError(ErrorTable.VOLUME_NOT_EMPTY, args); - } - - ListVolumes volumeList; - String user = vInfo.getOwner().getName(); - byte[] userVolumes = userDB.get(user.getBytes(encoding)); - if (userVolumes == null) { - throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); - } - - volumeList = ListVolumes.parse(new String(userVolumes, encoding)); - volumeList.getVolumes().remove(vInfo); - - metadataDB.delete(args.getVolumeName().getBytes(encoding)); - userDB.put(user.getBytes(encoding), - volumeList.toDBString().getBytes(encoding)); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Create a bucket if it does not exist. - * - * @param args - BucketArgs - * @throws OzoneException - */ - public void createBucket(BucketArgs args) throws OzoneException { - lock.writeLock().lock(); - try { - // check if volume exists, buckets cannot be created without volumes - byte[] volumeName = metadataDB.get(args.getVolumeName() - .getBytes(encoding)); - if (volumeName == null) { - throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); - } - - // A resource name is volume/bucket -- That is the key in metadata table - byte[] bucketName = metadataDB.get(args.getResourceName() - .getBytes(encoding)); - if (bucketName != null) { - throw ErrorTable.newError(ErrorTable.BUCKET_ALREADY_EXISTS, args); - } - - BucketInfo bucketInfo = - new BucketInfo(args.getVolumeName(), args.getBucketName()); - - if (args.getRemoveAcls() != null) { - OzoneException ex = ErrorTable.newError(ErrorTable.MALFORMED_ACL, args); - ex.setMessage("Remove ACLs specified in bucket create. Please remove " - + "them and retry."); - throw ex; - } - - VolumeInfo volInfo = VolumeInfo.parse(new String(volumeName, encoding)); - volInfo.setBucketCount(volInfo.getBucketCount() + 1); - - bucketInfo.setAcls(args.getAddAcls()); - bucketInfo.setStorageType(args.getStorageType()); - bucketInfo.setVersioning(args.getVersioning()); - ListBuckets bucketList; - - // get bucket list from user/volume -> bucketList - byte[] volumeBuckets = userDB.get(args.getParentName() - .getBytes(encoding)); - if (volumeBuckets == null) { - bucketList = new ListBuckets(); - } else { - bucketList = ListBuckets.parse(new String(volumeBuckets, encoding)); - } - - bucketList.addBucket(bucketInfo); - bucketList.sort(); - - // Update Volume->bucketCount - userDB.put(args.getVolumeName().getBytes(encoding), - volInfo.toDBString().getBytes(encoding)); - - // Now update the userDB with user/volume -> bucketList - userDB.put(args.getParentName().getBytes(encoding), - bucketList.toDBString().getBytes(encoding)); - - // Update userDB with volume/bucket -> empty key list - userDB.put(args.getResourceName().getBytes(encoding), - new ListKeys().toDBString().getBytes(encoding)); - - // and update the metadataDB with volume/bucket->BucketInfo - metadataDB.put(args.getResourceName().getBytes(encoding), - bucketInfo.toDBString().getBytes(encoding)); - - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Updates the Bucket properties like ACls and Storagetype. - * - * @param args - Bucket Args - * @param property - Flag which tells us what property to upgrade - * @throws OzoneException - */ - public void setBucketProperty(BucketArgs args, BucketProperty property) - throws OzoneException { - - lock.writeLock().lock(); - try { - // volume/bucket-> bucketInfo - byte[] bucketInfo = metadataDB.get(args.getResourceName(). - getBytes(encoding)); - if (bucketInfo == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - - BucketInfo info = BucketInfo.parse(new String(bucketInfo, encoding)); - byte[] volumeBuckets = userDB.get(args.getParentName() - .getBytes(encoding)); - ListBuckets bucketList = ListBuckets.parse(new String(volumeBuckets, - encoding)); - bucketList.getBuckets().remove(info); - - switch (property) { - case ACLS: - processRemoveAcls(args, info); - processAddAcls(args, info); - break; - case STORAGETYPE: - info.setStorageType(args.getStorageType()); - break; - case VERSIONING: - info.setVersioning(args.getVersioning()); - break; - default: - OzoneException ozEx = - ErrorTable.newError(ErrorTable.BAD_PROPERTY, args); - ozEx.setMessage("Bucket property is not recognized."); - throw ozEx; - } - - bucketList.addBucket(info); - metadataDB.put(args.getResourceName().getBytes(encoding), - info.toDBString().getBytes(encoding)); - - userDB.put(args.getParentName().getBytes(encoding), - bucketList.toDBString().getBytes(encoding)); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Process Remove Acls and remove them from the bucket. - * - * @param args - BucketArgs - * @param info - BucketInfo - */ - private void processRemoveAcls(BucketArgs args, BucketInfo info) { - List removeAcls = args.getRemoveAcls(); - if ((removeAcls == null) || (info.getAcls() == null)) { - return; - } - for (OzoneAcl racl : args.getRemoveAcls()) { - ListIterator aclIter = info.getAcls().listIterator(); - while (aclIter.hasNext()) { - if (racl.equals(aclIter.next())) { - aclIter.remove(); - break; - } - } - } - } - - /** - * Process Add Acls and Add them to the bucket. - * - * @param args - BucketArgs - * @param info - BucketInfo - */ - private void processAddAcls(BucketArgs args, BucketInfo info) { - List addAcls = args.getAddAcls(); - if ((addAcls == null)) { - return; - } - - if (info.getAcls() == null) { - info.setAcls(addAcls); - return; - } - - for (OzoneAcl newacl : addAcls) { - ListIterator aclIter = info.getAcls().listIterator(); - while (aclIter.hasNext()) { - if (newacl.equals(aclIter.next())) { - continue; - } - } - info.getAcls().add(newacl); - } - } - - /** - * Deletes a given bucket. - * - * @param args - BucketArgs - * @throws OzoneException - */ - public void deleteBucket(BucketArgs args) throws OzoneException { - lock.writeLock().lock(); - try { - byte[] bucketInfo = metadataDB.get(args.getResourceName() - .getBytes(encoding)); - if (bucketInfo == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - - BucketInfo bInfo = BucketInfo.parse(new String(bucketInfo, encoding)); - - // Only remove buckets if they are empty. - if (bInfo.getKeyCount() > 0) { - throw ErrorTable.newError(ErrorTable.BUCKET_NOT_EMPTY, args); - } - - byte[] bucketBytes = userDB.get(args.getParentName().getBytes(encoding)); - if (bucketBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - - ListBuckets bucketList = - ListBuckets.parse(new String(bucketBytes, encoding)); - bucketList.getBuckets().remove(bInfo); - - metadataDB.delete(args.getResourceName().getBytes(encoding)); - userDB.put(args.getParentName().getBytes(encoding), - bucketList.toDBString().getBytes(encoding)); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the Bucket info for a given bucket. - * - * @param args - Bucket Args - * @return BucketInfo - Bucket Information - * @throws OzoneException - */ - public BucketInfo getBucketInfo(BucketArgs args) throws OzoneException { - lock.readLock().lock(); - try { - byte[] bucketBytes = metadataDB.get(args.getResourceName() - .getBytes(encoding)); - if (bucketBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - - return BucketInfo.parse(new String(bucketBytes, encoding)); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns a list of buckets for a given volume. - * - * @param args - volume args - * @return List of buckets - * @throws OzoneException - */ - public ListBuckets listBuckets(ListArgs args) throws OzoneException { - lock.readLock().lock(); - try { - Preconditions.checkState(args.getArgs() instanceof VolumeArgs); - VolumeArgs vArgs = (VolumeArgs) args.getArgs(); - String userVolKey = vArgs.getUserName() + "/" + vArgs.getVolumeName(); - - // TODO : Query using Prefix and PrevKey - byte[] bucketBytes = userDB.get(userVolKey.getBytes(encoding)); - if (bucketBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_VOLUME_NAME, - args.getArgs()); - } - return ListBuckets.parse(new String(bucketBytes, encoding)); - } catch (IOException ex) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args.getArgs(), ex); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Creates a key and returns a stream to which this key can be written to. - * - * @param args KeyArgs - * @return - A stream into which key can be written to. - * @throws OzoneException - */ - public OutputStream createKey(KeyArgs args) throws OzoneException { - lock.writeLock().lock(); - try { - String fileNameHash = DigestUtils.sha256Hex(args.getResourceName()); - - // Please don't try trillion objects unless the physical file system - // is capable of doing that in a single directory. - - String fullPath = storageRoot + OBJECT_DIR + fileNameHash; - File f = new File(fullPath); - - // In real ozone it would not be this way, a file will be overwritten - // only if the upload is successful. - if (f.exists()) { - LOG.debug("we are overwriting a file. This is by design."); - if (!f.delete()) { - LOG.error("Unable to delete the file: {}", fullPath); - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args); - } - } - - // f.createNewFile(); - FileOutputStream fsStream = new FileOutputStream(f); - inProgressObjects.put(fsStream, fullPath); - - return fsStream; - } catch (IOException e) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, e); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * commit keys moves an In progress object into the metadata store so that key - * is visible in the metadata operations from that point onwards. - * - * @param args Object args - * @throws OzoneException - */ - public void commitKey(KeyArgs args, OutputStream stream) - throws OzoneException { - SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US); - lock.writeLock().lock(); - - try { - byte[] bucketInfo = metadataDB.get(args.getParentName() - .getBytes(encoding)); - if (bucketInfo == null) { - throw ErrorTable.newError(ErrorTable.INVALID_RESOURCE_NAME, args); - } - BucketInfo bInfo = BucketInfo.parse(new String(bucketInfo, encoding)); - bInfo.setKeyCount(bInfo.getKeyCount() + 1); - - String fileNameHash = inProgressObjects.get(stream); - inProgressObjects.remove(stream); - if (fileNameHash == null) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args); - } - - ListKeys keyList; - byte[] bucketListBytes = userDB.get(args.getParentName() - .getBytes(encoding)); - keyList = ListKeys.parse(new String(bucketListBytes, encoding)); - KeyInfo keyInfo; - - byte[] objectBytes = metadataDB.get(args.getResourceName() - .getBytes(encoding)); - - if (objectBytes != null) { - // we are overwriting an existing object. - // TODO : Emit info for Accounting - keyInfo = KeyInfo.parse(new String(objectBytes, encoding)); - keyList.getKeyList().remove(keyInfo); - } else { - keyInfo = new KeyInfo(); - } - - keyInfo.setCreatedOn(format.format(new Date(System.currentTimeMillis()))); - - // TODO : support version, we need to check if versioning - // is switched on the bucket and make appropriate calls. - keyInfo.setVersion(0); - - keyInfo.setDataFileName(fileNameHash); - keyInfo.setKeyName(args.getKeyName()); - keyInfo.setMd5hash(args.getHash()); - keyInfo.setSize(args.getSize()); - - keyList.getKeyList().add(keyInfo); - - // if the key exists, we overwrite happily :). since the - // earlier call - createObject - has overwritten the data. - - metadataDB.put(args.getResourceName().getBytes(encoding), - keyInfo.toDBString().getBytes(encoding)); - - metadataDB.put(args.getParentName().getBytes(encoding), - bInfo.toDBString().getBytes(encoding)); - - userDB.put(args.getParentName().getBytes(encoding), - keyList.toDBString().getBytes(encoding)); - - } catch (IOException e) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, e); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * deletes an key from a given bucket. - * - * @param args - ObjectArgs - * @throws OzoneException - */ - public void deleteKey(KeyArgs args) throws OzoneException { - lock.writeLock().lock(); - try { - byte[] bucketInfo = metadataDB.get(args.getParentName() - .getBytes(encoding)); - if (bucketInfo == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - BucketInfo bInfo = BucketInfo.parse(new String(bucketInfo, encoding)); - bInfo.setKeyCount(bInfo.getKeyCount() - 1); - - - byte[] bucketListBytes = userDB.get(args.getParentName() - .getBytes(encoding)); - if (bucketListBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - ListKeys keyList = ListKeys.parse(new String(bucketListBytes, encoding)); - - - byte[] objectBytes = metadataDB.get(args.getResourceName() - .getBytes(encoding)); - if (objectBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_KEY, args); - } - - KeyInfo oInfo = KeyInfo.parse(new String(objectBytes, encoding)); - keyList.getKeyList().remove(oInfo); - - String fileNameHash = DigestUtils.sha256Hex(args.getResourceName()); - - String fullPath = storageRoot + OBJECT_DIR + fileNameHash; - File f = new File(fullPath); - - if (f.exists()) { - if (!f.delete()) { - throw ErrorTable.newError(ErrorTable.KEY_OPERATION_CONFLICT, args); - } - } else { - throw ErrorTable.newError(ErrorTable.INVALID_KEY, args); - } - - - metadataDB.delete(args.getResourceName().getBytes(encoding)); - metadataDB.put(args.getParentName().getBytes(encoding), - bInfo.toDBString().getBytes(encoding)); - userDB.put(args.getParentName().getBytes(encoding), - keyList.toDBString().getBytes(encoding)); - } catch (IOException e) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, e); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns a Stream for the file. - * - * @param args - Object args - * @return Stream - * @throws IOException - * @throws OzoneException - */ - public LengthInputStream newKeyReader(KeyArgs args) - throws IOException, OzoneException { - lock.readLock().lock(); - try { - String fileNameHash = DigestUtils.sha256Hex(args.getResourceName()); - String fullPath = storageRoot + OBJECT_DIR + fileNameHash; - File f = new File(fullPath); - if (!f.exists()) { - throw ErrorTable.newError(ErrorTable.INVALID_RESOURCE_NAME, args); - } - long size = f.length(); - - FileInputStream fileStream = new FileInputStream(f); - return new LengthInputStream(fileStream, size); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns keys in a bucket. - * - * @param args - * @return List of keys. - * @throws IOException - * @throws OzoneException - */ - public ListKeys listKeys(ListArgs args) throws IOException, OzoneException { - lock.readLock().lock(); - // TODO : Support Prefix and PrevKey lookup. - try { - Preconditions.checkState(args.getArgs() instanceof BucketArgs); - BucketArgs bArgs = (BucketArgs) args.getArgs(); - byte[] bucketInfo = metadataDB.get(bArgs.getResourceName() - .getBytes(encoding)); - if (bucketInfo == null) { - throw ErrorTable.newError(ErrorTable.INVALID_RESOURCE_NAME, bArgs); - } - - byte[] bucketListBytes = userDB.get(bArgs.getResourceName() - .getBytes(encoding)); - if (bucketListBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_RESOURCE_NAME, bArgs); - } - return ListKeys.parse(new String(bucketListBytes, encoding)); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Get the Key information for a given key. - * - * @param args - Key Args - * @return KeyInfo - Key Information - * @throws OzoneException - */ - public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException { - lock.readLock().lock(); - try { - byte[] bucketInfo = metadataDB - .get(args.getParentName().getBytes(encoding)); - if (bucketInfo == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - - byte[] bucketListBytes = userDB - .get(args.getParentName().getBytes(encoding)); - if (bucketListBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, args); - } - - byte[] objectBytes = metadataDB - .get(args.getResourceName().getBytes(encoding)); - if (objectBytes == null) { - throw ErrorTable.newError(ErrorTable.INVALID_KEY, args); - } - - return KeyInfo.parse(new String(objectBytes, encoding)); - } catch (IOException e) { - throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, e); - } finally { - lock.readLock().unlock(); - } - } - - /** - * This is used in updates to volume metadata. - */ - public enum VolumeProperty { - OWNER, QUOTA - } - - /** - * Bucket Properties. - */ - public enum BucketProperty { - ACLS, STORAGETYPE, VERSIONING - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index ad21f28ec4..d3bc857632 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -57,8 +57,7 @@ public class TestOzoneFSInputStream { /** * Create a MiniDFSCluster for testing. *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" + * Ozone is made active by setting OZONE_ENABLED = true * * @throws IOException */ @@ -90,9 +89,6 @@ public class TestOzoneFSInputStream { // Fetch the host and port for File System init DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) .getDatanodeDetails(); - int port = datanodeDetails - .getPort(DatanodeDetails.Port.Name.REST).getValue(); - String host = datanodeDetails.getHostName(); // Set the fs.defaultFS and start the filesystem String uri = String.format("%s://%s.%s/",