diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index 36d953c960..43e6fe79b7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -47,6 +47,8 @@ public OzoneConfiguration() { public OzoneConfiguration(Configuration conf) { super(conf); + //load the configuration from the classloader of the original conf. + setClassLoader(conf.getClassLoader()); } public List readPropertyFromXml(URL url) throws JAXBException { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index e9a52f8aae..91f53f3c51 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -371,6 +371,13 @@ public final class OzoneConfigKeys { public static final boolean OZONE_ACL_ENABLED_DEFAULT = false; + //For technical reasons this is unused and hardcoded to the + // OzoneFileSystem.initialize. + public static final String OZONE_FS_ISOLATED_CLASSLOADER = + "ozone.fs.isolated-classloader"; + + + /** * There is no need to instantiate this class. */ diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 54899360b8..7ba15f3a53 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1806,4 +1806,19 @@ not be renewed. + + + ozone.fs.isolated-classloader + + OZONE, OZONEFS + + Enable it for older hadoops to separate the classloading of all the + Ozone classes. With 'true' value, ozonefs can be used with older + hadoop versions as the hadoop3/ozone related classes are loaded by + an isolated classloader. + + Default depends from the used jar. true for ozone-filesystem-lib-legacy + jar and false for the ozone-filesystem-lib.jar + + \ No newline at end of file diff --git a/hadoop-hdds/docs/content/OzoneFS.md b/hadoop-hdds/docs/content/OzoneFS.md index 92c83d81ae..b7f8a74229 100644 --- a/hadoop-hdds/docs/content/OzoneFS.md +++ b/hadoop-hdds/docs/content/OzoneFS.md @@ -56,12 +56,11 @@ This will make this bucket to be the default file system for HDFS dfs commands a You also need to add the ozone-filesystem.jar file to the classpath: {{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozonefs/hadoop-ozone-filesystem.jar:$HADOOP_CLASSPATH +export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-lib-.*.jar:$HADOOP_CLASSPATH {{< /highlight >}} - Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. For example, @@ -78,3 +77,19 @@ hdfs dfs -mkdir /users Or put command etc. In other words, all programs like Hive, Spark, and Distcp will work against this file system. Please note that any keys created/deleted in the bucket using methods apart from OzoneFileSystem will show up as diectories and files in the Ozone File System. + +## Legacy mode + +There are two ozonefs files which includes all the dependencies: + + * share/ozone/lib/hadoop-ozone-filesystem-lib-VERSION.jar + * share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-VERSION.jar + + The first one contains all the required dependency to use ozonefs with a + compatible hadoop version (hadoop 3.2 / 3.1). + + The second one contains all the dependency in an internal, separated directory, + and a special class loader is used to load all the classes from the location. + + With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from + any older hadoop version (eg. hadoop 2.7 or spark+hadoop 2.7) \ No newline at end of file diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index 250a089984..dc2819b6f8 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -106,10 +106,6 @@ run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/workers.sh" "sb run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/" run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/" -#shaded ozonefs -run mkdir -p "./share/hadoop/ozonefs" -run cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" - #shaded datanode service run mkdir -p "./share/hadoop/ozoneplugin" run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar" diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index f625cb1526..6224e9d072 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -82,6 +82,13 @@ classpath hadoop-ozone-tools.classpath + + org.apache.hadoop + hadoop-ozone-filesystem + ${ozone.version} + classpath + hadoop-ozone-filesystem.classpath + org.apache.hadoop hadoop-ozone-common @@ -183,6 +190,14 @@ org.apache.hadoop hadoop-ozone-tools + + org.apache.hadoop + hadoop-ozone-filesystem-lib + + + org.apache.hadoop + hadoop-ozone-filesystem-lib-legacy + org.apache.hadoop hadoop-ozone-common diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml index f155b3bf40..22055fce5b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-compose.yaml @@ -19,12 +19,12 @@ services: datanode: image: apache/hadoop-runner volumes: - - ../..:/opt/hadoop + - ../..:/opt/hadoop ports: - - 9864 + - 9864 command: ["/opt/hadoop/bin/ozone","datanode"] env_file: - - ./docker-config + - ./docker-config ozoneManager: image: apache/hadoop-runner hostname: ozoneManager @@ -36,7 +36,7 @@ services: ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION WAITFOR: scm:9876 env_file: - - ./docker-config + - ./docker-config command: ["/opt/hadoop/bin/ozone","om"] scm: image: apache/hadoop-runner @@ -45,16 +45,25 @@ services: ports: - 9876 env_file: - - ./docker-config + - ./docker-config environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION command: ["/opt/hadoop/bin/ozone","scm"] - hadooplast: + hadoop3: image: flokkr/hadoop:3.1.0 volumes: - - ../..:/opt/ozone + - ../..:/opt/ozone env_file: - - ./docker-config + - ./docker-config environment: - HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozonefs/*.jar + HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-0*.jar + command: ["watch","-n","100000","ls"] + hadoop2: + image: flokkr/hadoop:2.9.0 + volumes: + - ../..:/opt/ozone + env_file: + - ./docker-config + environment: + HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy*.jar command: ["watch","-n","100000","ls"] diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml new file mode 100644 index 0000000000..b4b7636724 --- /dev/null +++ b/hadoop-ozone/ozonefs-lib-legacy/pom.xml @@ -0,0 +1,104 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-ozone + 0.4.0-SNAPSHOT + + hadoop-ozone-filesystem-lib-legacy + Apache Hadoop Ozone FileSystem Legacy Jar Library + This projects creates an uberjar from ozonefs with all the + dependencies, but the dependencies are located in an isolated subdir + and loaded by a custom class loader. Can be used together with Hadoop 2.x + + jar + 0.4.0-SNAPSHOT + + UTF-8 + true + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + include-dependencies + + unpack-dependencies + + prepare-package + + target/classes/libs + compile + META-INF/*.SF + + slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j,hadoop-ozone-filesystem + + + + + include-ozonefs + + unpack-dependencies + + prepare-package + + target/classes + hadoop-ozone-filesystem + compile + META-INF/*.SF + + + + + + org.codehaus.mojo + animal-sniffer-maven-plugin + + + signature-check + + + + + + + + + + org.apache.hadoop + hadoop-ozone-filesystem + compile + + + diff --git a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt new file mode 100644 index 0000000000..85c13074a3 --- /dev/null +++ b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt @@ -0,0 +1,21 @@ + + +Apache Hadoop Ozone placeholder file. + +The usage of the legacy version of the uber jar can be detected based on +the existence of this file. diff --git a/hadoop-ozone/ozonefs-lib/pom.xml b/hadoop-ozone/ozonefs-lib/pom.xml new file mode 100644 index 0000000000..c8c5c75086 --- /dev/null +++ b/hadoop-ozone/ozonefs-lib/pom.xml @@ -0,0 +1,89 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-ozone + 0.4.0-SNAPSHOT + + hadoop-ozone-filesystem-lib + Apache Hadoop Ozone FileSystem Single Jar Library + jar + This projects creates an uber jar from ozonefs with all the + dependencies. + + 0.4.0-SNAPSHOT + + UTF-8 + true + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + + unpack-dependencies + + prepare-package + + target/classes + compile + META-INF/*.SF + + slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j + + + + + + + org.codehaus.mojo + animal-sniffer-maven-plugin + + + signature-check + + + + + + + + + + org.apache.hadoop + hadoop-ozone-filesystem + compile + + + diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index 0bf1c3a15c..95a602c146 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -43,46 +43,6 @@ - - org.apache.maven.plugins - maven-shade-plugin - 3.1.1 - - - - com.google.guava:guava - org.slf4j:slf4j-api - com.google.protobuf:protobuf-java - com.nimbusds:nimbus-jose-jwt - com.github.stephenc.jcip:jcip-annotations - com.google.code.findbugs:jsr305 - org.apache.hadoop:hadoop-ozone-client - org.apache.hadoop:hadoop-hdds-client - org.apache.hadoop:hadoop-hdds-common - org.fusesource.leveldbjni:leveldbjni-all - org.apache.ratis:ratis-server - org.apache.ratis:ratis-proto-shaded - com.google.auto.value:auto-value-annotations - com.squareup:javapoet - org.jctools:jctools-core - org.apache.ratis:ratis-common - org.apache.ratis:ratis-client - org.apache.ratis:ratis-netty - org.apache.ratis:ratis-grpc - org.rocksdb:rocksdbjni - org.apache.hadoop:hadoop-ozone-common - - - - - - package - - shade - - - - org.apache.maven.plugins maven-dependency-plugin @@ -96,7 +56,7 @@ - ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt + ${project.basedir}/target/1hadoop-tools-deps/${project.artifactId}.tools-optional.txt @@ -110,7 +70,17 @@ ITestOzoneContract*.java - + + + org.codehaus.mojo + animal-sniffer-maven-plugin + + + signature-check + + + + @@ -118,17 +88,17 @@ org.apache.hadoop hadoop-common - provided + compile org.apache.hadoop hadoop-hdfs - provided + compile org.apache.hadoop hadoop-hdfs-client - provided + compile org.apache.hadoop diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java new file mode 100644 index 0000000000..06ebc15295 --- /dev/null +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +/** + * Minimum set of Ozone key information attributes. + *

+ * This class doesn't depend on any other ozone class just on primitive + * java types. It could be used easily in the signature of OzoneClientAdapter + * as even if a separated class loader is loaded it it won't cause any + * dependency problem. + */ +public class BasicKeyInfo { + + private String name; + + private long modificationTime; + + private long dataSize; + + public BasicKeyInfo(String name, long modificationTime, long size) { + this.name = name; + this.modificationTime = modificationTime; + this.dataSize = size; + } + + public String getName() { + return name; + } + + public long getModificationTime() { + return modificationTime; + } + + public long getDataSize() { + return dataSize; + } +} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java new file mode 100644 index 0000000000..462724521a --- /dev/null +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import java.net.URL; +import java.net.URLClassLoader; +import java.util.HashSet; +import java.util.Set; + +/** + * Class loader which delegates the loading only for the selected class. + * + *

+ * By default java classloader delegates first all the class loading to the + * parent, and loads the class only if it's not found in the class. + *

+ * This simple class loader do the opposit. Everything is loaded with this + * class loader without delegation _except_ the few classes which are defined + * in the constructor. + *

+ * With this method we can use two separated class loader (the original main + * classloader and instance of this which loaded separated classes, but the + * few selected classes are shared between the two class loaders. + *

+ * With this approach it's possible to use any older hadoop version + * (main classloader) together with ozonefs (instance of this classloader) as + * only the selected classes are selected between the class loaders. + */ +public class FilteredClassLoader extends URLClassLoader { + + private final ClassLoader systemClassLoader; + + private final ClassLoader delegate; + private Set delegatedClasses = new HashSet<>(); + + public FilteredClassLoader(URL[] urls, ClassLoader parent) { + super(urls, null); + delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneClientAdapter"); + delegatedClasses.add("org.apache.hadoop.fs.ozone.BasicKeyInfo"); + delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSOutputStream"); + delegatedClasses.add("org.apache.hadoop.fs.Seekable"); + this.delegate = parent; + systemClassLoader = getSystemClassLoader(); + + } + + @Override + public Class loadClass(String name) throws ClassNotFoundException { + if (delegatedClasses.contains(name) || + name.startsWith("org.apache.log4j") || + name.startsWith("org.slf4j")) { + return delegate.loadClass(name); + } + return super.loadClass(name); + } + + private Class loadFromSystem(String name) { + if (systemClassLoader != null) { + try { + return systemClassLoader.loadClass(name); + } catch (ClassNotFoundException ex) { + //no problem + return null; + } + } else { + return null; + } + } +} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java new file mode 100644 index 0000000000..59f3f7a97c --- /dev/null +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Iterator; + +/** + * Lightweight adapter to separte hadoop/ozone classes. + *

+ * This class contains only the bare minimum Ozone classes in the signature. + * It could be loaded by a different classloader because only the objects in + * the method signatures should be shared between the classloader. + */ +public interface OzoneClientAdapter { + + void close() throws IOException; + + InputStream createInputStream(String key) throws IOException; + + OzoneFSOutputStream createKey(String key) throws IOException; + + void renameKey(String key, String newKeyName) throws IOException; + + BasicKeyInfo getKeyInfo(String keyName); + + boolean isDirectory(BasicKeyInfo key); + + boolean createDirectory(String keyName); + + boolean deleteObject(String keyName); + + long getCreationTime(); + + boolean hasNextKey(String key); + + Iterator listKeys(String pathKey); + +} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java new file mode 100644 index 0000000000..2aa816a526 --- /dev/null +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Creates OzoneClientAdapter with classloader separation. + */ +public final class OzoneClientAdapterFactory { + + static final Logger LOG = + LoggerFactory.getLogger(OzoneClientAdapterFactory.class); + + private OzoneClientAdapterFactory() { + } + + public static OzoneClientAdapter createAdapter( + String volumeStr, + String bucketStr) + throws IOException { + + ClassLoader currentClassLoader = OzoneFileSystem.class.getClassLoader(); + List urls = new ArrayList<>(); + + findEmbeddedLibsUrl(urls, currentClassLoader); + + findConfigDirUrl(urls, currentClassLoader); + + ClassLoader classLoader = + new FilteredClassLoader(urls.toArray(new URL[0]), currentClassLoader); + + try { + + ClassLoader contextClassLoader = + Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(classLoader); + + //this class caches the context classloader during the first load + //call it here when the context class loader is set to the isoloated + //loader to make sure the grpc class will be loaded by the right + //loader + Class reflectionUtils = + classLoader.loadClass("org.apache.ratis.util.ReflectionUtils"); + reflectionUtils.getMethod("getClassByName", String.class) + .invoke(null, "org.apache.ratis.grpc.GrpcFactory"); + + OzoneClientAdapter ozoneClientAdapter = (OzoneClientAdapter) classLoader + .loadClass("org.apache.hadoop.fs.ozone.OzoneClientAdapterImpl") + .getConstructor(String.class, String.class) + .newInstance( + volumeStr, + bucketStr); + + Thread.currentThread().setContextClassLoader(contextClassLoader); + + return ozoneClientAdapter; + } catch (Exception e) { + LOG.error("Can't initialize the ozoneClientAdapter", e); + throw new IOException( + "Can't initialize the OzoneClientAdapter implementation", e); + } + + } + + private static void findConfigDirUrl(List urls, + ClassLoader currentClassLoader) throws IOException { + Enumeration conf = + currentClassLoader.getResources("ozone-site.xml"); + while (conf.hasMoreElements()) { + urls.add( + new URL( + conf.nextElement().toString().replace("ozone-site.xml", ""))); + + } + } + + private static void findEmbeddedLibsUrl(List urls, + ClassLoader currentClassloader) + throws MalformedURLException { + + //marker file is added to the jar to make it easier to find the URL + // for the current jar. + String markerFile = "ozonefs.txt"; + ClassLoader currentClassLoader = OzoneFileSystem.class.getClassLoader(); + + URL ozFs = currentClassLoader + .getResource(markerFile); + String rootPath = ozFs.toString().replace(markerFile, ""); + urls.add(new URL(rootPath)); + + urls.add(new URL(rootPath + "libs/")); + + } + +} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java new file mode 100644 index 0000000000..23d32e2f79 --- /dev/null +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java @@ -0,0 +1,235 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Iterator; + +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; + +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implementation of the OzoneFileSystem calls. + */ +public class OzoneClientAdapterImpl implements OzoneClientAdapter { + + static final Logger LOG = + LoggerFactory.getLogger(OzoneClientAdapterImpl.class); + + private OzoneClient ozoneClient; + private ObjectStore objectStore; + private OzoneVolume volume; + private OzoneBucket bucket; + private ReplicationType replicationType; + private ReplicationFactor replicationFactor; + + public OzoneClientAdapterImpl(String volumeStr, String bucketStr) + throws IOException { + this(createConf(), volumeStr, bucketStr); + } + + private static OzoneConfiguration createConf() { + ClassLoader contextClassLoader = + Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(null); + OzoneConfiguration conf = new OzoneConfiguration(); + Thread.currentThread().setContextClassLoader(contextClassLoader); + return conf; + } + + public OzoneClientAdapterImpl(OzoneConfiguration conf, String volumeStr, + String bucketStr) throws IOException { + ClassLoader contextClassLoader = + Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(null); + try { + String replicationTypeConf = + conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE, + OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT); + + int replicationCountConf = conf.getInt(OzoneConfigKeys.OZONE_REPLICATION, + OzoneConfigKeys.OZONE_REPLICATION_DEFAULT); + this.ozoneClient = + OzoneClientFactory.getRpcClient(conf); + objectStore = ozoneClient.getObjectStore(); + this.volume = objectStore.getVolume(volumeStr); + this.bucket = volume.getBucket(bucketStr); + this.replicationType = ReplicationType.valueOf(replicationTypeConf); + this.replicationFactor = ReplicationFactor.valueOf(replicationCountConf); + } finally { + Thread.currentThread().setContextClassLoader(contextClassLoader); + } + + } + + @Override + public void close() throws IOException { + ozoneClient.close(); + } + + @Override + public InputStream createInputStream(String key) throws IOException { + return bucket.readKey(key).getInputStream(); + } + + @Override + public OzoneFSOutputStream createKey(String key) throws IOException { + OzoneOutputStream ozoneOutputStream = + bucket.createKey(key, 0, replicationType, replicationFactor, + new HashMap<>()); + return new OzoneFSOutputStream(ozoneOutputStream.getOutputStream()); + } + + @Override + public void renameKey(String key, String newKeyName) throws IOException { + bucket.renameKey(key, newKeyName); + } + + /** + * Helper method to fetch the key metadata info. + * + * @param keyName key whose metadata information needs to be fetched + * @return metadata info of the key + */ + @Override + public BasicKeyInfo getKeyInfo(String keyName) { + try { + OzoneKey key = bucket.getKey(keyName); + return new BasicKeyInfo( + keyName, + key.getModificationTime(), + key.getDataSize() + ); + } catch (IOException e) { + LOG.trace("Key:{} does not exist", keyName); + return null; + } + } + + /** + * Helper method to check if an Ozone key is representing a directory. + * + * @param key key to be checked as a directory + * @return true if key is a directory, false otherwise + */ + @Override + public boolean isDirectory(BasicKeyInfo key) { + LOG.trace("key name:{} size:{}", key.getName(), + key.getDataSize()); + return key.getName().endsWith(OZONE_URI_DELIMITER) + && (key.getDataSize() == 0); + } + + /** + * Helper method to create an directory specified by key name in bucket. + * + * @param keyName key name to be created as directory + * @return true if the key is created, false otherwise + */ + @Override + public boolean createDirectory(String keyName) { + try { + LOG.trace("creating dir for key:{}", keyName); + bucket.createKey(keyName, 0, replicationType, replicationFactor, + new HashMap<>()).close(); + return true; + } catch (IOException ioe) { + LOG.error("create key failed for key:{}", keyName, ioe); + return false; + } + } + + /** + * Helper method to delete an object specified by key name in bucket. + * + * @param keyName key name to be deleted + * @return true if the key is deleted, false otherwise + */ + @Override + public boolean deleteObject(String keyName) { + LOG.trace("issuing delete for key" + keyName); + try { + bucket.deleteKey(keyName); + return true; + } catch (IOException ioe) { + LOG.error("delete key failed " + ioe.getMessage()); + return false; + } + } + + @Override + public long getCreationTime() { + return bucket.getCreationTime(); + } + + @Override + public boolean hasNextKey(String key) { + return bucket.listKeys(key).hasNext(); + } + + @Override + public Iterator listKeys(String pathKey) { + return new IteratorAdapter(bucket.listKeys(pathKey)); + } + + /** + * Adapter to conver OzoneKey to a safe and simple Key implementation. + */ + public static class IteratorAdapter implements Iterator { + + private Iterator original; + + public IteratorAdapter(Iterator listKeys) { + this.original = listKeys; + } + + @Override + public boolean hasNext() { + return original.hasNext(); + } + + @Override + public BasicKeyInfo next() { + OzoneKey next = original.next(); + if (next == null) { + return null; + } else { + return new BasicKeyInfo( + next.getName(), + next.getModificationTime(), + next.getDataSize() + ); + } + } + } +} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index 5df3cffa3d..909b2aff9b 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -18,13 +18,13 @@ package org.apache.hadoop.fs.ozone; +import java.io.IOException; +import java.io.InputStream; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FSInputStream; -import org.apache.hadoop.ozone.client.io.KeyInputStream; - -import java.io.IOException; -import java.io.InputStream; +import org.apache.hadoop.fs.Seekable; /** * The input stream for Ozone file system. @@ -36,10 +36,10 @@ @InterfaceStability.Evolving public final class OzoneFSInputStream extends FSInputStream { - private final KeyInputStream inputStream; + private final InputStream inputStream; public OzoneFSInputStream(InputStream inputStream) { - this.inputStream = (KeyInputStream)inputStream; + this.inputStream = inputStream; } @Override @@ -59,12 +59,12 @@ public synchronized void close() throws IOException { @Override public void seek(long pos) throws IOException { - inputStream.seek(pos); + ((Seekable) inputStream).seek(pos); } @Override public long getPos() throws IOException { - return inputStream.getPos(); + return ((Seekable) inputStream).getPos(); } @Override diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java index 3670cffa94..efbf93beb5 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.io.OutputStream; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; /** @@ -31,10 +30,10 @@ */ public class OzoneFSOutputStream extends OutputStream { - private final KeyOutputStream outputStream; + private final OutputStream outputStream; public OzoneFSOutputStream(OutputStream outputStream) { - this.outputStream = (KeyOutputStream)outputStream; + this.outputStream = outputStream; } @Override diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index eb9d100beb..abec436609 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -25,20 +25,18 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Iterator; import java.util.regex.Matcher; import java.util.regex.Pattern; - -import com.google.common.base.Preconditions; import java.util.stream.Collectors; import java.util.stream.Stream; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -47,34 +45,25 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.http.client.utils.URIBuilder; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import com.google.common.base.Preconditions; +import org.apache.commons.lang3.StringUtils; +import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; +import org.apache.http.client.utils.URIBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The Ozone Filesystem implementation. - * + *

* This subclass is marked as private as code should not be creating it * directly; use {@link FileSystem#get(Configuration)} and variants to create * one. If cast to {@link OzoneFileSystem}, extra methods and features may be @@ -85,16 +74,15 @@ public class OzoneFileSystem extends FileSystem { static final Logger LOG = LoggerFactory.getLogger(OzoneFileSystem.class); - /** The Ozone client for connecting to Ozone server. */ - private OzoneClient ozoneClient; - private ObjectStore objectStore; - private OzoneVolume volume; - private OzoneBucket bucket; + /** + * The Ozone client for connecting to Ozone server. + */ + private URI uri; private String userName; private Path workingDir; - private ReplicationType replicationType; - private ReplicationFactor replicationFactor; + + private OzoneClientAdapter adapter; private static final Pattern URL_SCHEMA_PATTERN = Pattern.compile("(.+)\\.([^\\.]+)"); @@ -102,11 +90,7 @@ public class OzoneFileSystem extends FileSystem { @Override public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); - if(!(conf instanceof OzoneConfiguration)) { - setConf(new OzoneConfiguration(conf)); - } else { - setConf(conf); - } + setConf(conf); Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name); assert getScheme().equals(name.getScheme()); @@ -125,16 +109,32 @@ public void initialize(URI name, Configuration conf) throws IOException { uri = new URIBuilder().setScheme(OZONE_URI_SCHEME) .setHost(authority).build(); LOG.trace("Ozone URI for ozfs initialization is " + uri); - this.ozoneClient = OzoneClientFactory.getRpcClient(getConf()); - objectStore = ozoneClient.getObjectStore(); - this.volume = objectStore.getVolume(volumeStr); - this.bucket = volume.getBucket(bucketStr); - this.replicationType = ReplicationType.valueOf( - getConf().get(OzoneConfigKeys.OZONE_REPLICATION_TYPE, - OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT)); - this.replicationFactor = ReplicationFactor.valueOf( - getConf().getInt(OzoneConfigKeys.OZONE_REPLICATION, - OzoneConfigKeys.OZONE_REPLICATION_DEFAULT)); + + //isolated is the default for ozonefs-lib-legacy which includes the + // /ozonefs.txt, otherwise the default is false. It could be overridden. + boolean defaultValue = + OzoneFileSystem.class.getClassLoader().getResource("ozonefs.txt") + != null; + + //Use string here instead of the constant as constant may not be available + //on the classpath of a hadoop 2.7 + boolean isolatedClassloader = + conf.getBoolean("ozone.fs.isolated-classloader", defaultValue); + + if (isolatedClassloader) { + this.adapter = + OzoneClientAdapterFactory.createAdapter(volumeStr, bucketStr); + } else { + OzoneConfiguration ozoneConfiguration; + if (conf instanceof OzoneConfiguration) { + ozoneConfiguration = (OzoneConfiguration) conf; + } else { + ozoneConfiguration = new OzoneConfiguration(conf); + } + this.adapter = new OzoneClientAdapterImpl(ozoneConfiguration, + volumeStr, bucketStr); + } + try { this.userName = UserGroupInformation.getCurrentUser().getShortUserName(); @@ -142,7 +142,7 @@ public void initialize(URI name, Configuration conf) throws IOException { this.userName = OZONE_DEFAULT_USER; } this.workingDir = new Path(OZONE_USER_DIR, this.userName) - .makeQualified(this.uri, this.workingDir); + .makeQualified(this.uri, this.workingDir); } catch (URISyntaxException ue) { final String msg = "Invalid Ozone endpoint " + name; LOG.error(msg, ue); @@ -153,7 +153,7 @@ public void initialize(URI name, Configuration conf) throws IOException { @Override public void close() throws IOException { try { - ozoneClient.close(); + adapter.close(); } finally { super.close(); } @@ -179,7 +179,7 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException { } return new FSDataInputStream( - new OzoneFSInputStream(bucket.readKey(key).getInputStream())); + new OzoneFSInputStream(adapter.createInputStream(key))); } @Override @@ -200,19 +200,16 @@ public FSDataOutputStream create(Path f, FsPermission permission, throw new FileAlreadyExistsException(f + " already exists"); } LOG.trace("Overwriting file {}", f); - deleteObject(key); + adapter.deleteObject(key); } } catch (FileNotFoundException ignored) { // this means the file is not found } - OzoneOutputStream ozoneOutputStream = - bucket.createKey(key, 0, replicationType, replicationFactor, - new HashMap<>()); // We pass null to FSDataOutputStream so it won't count writes that // are being buffered to a file return new FSDataOutputStream( - new OzoneFSOutputStream(ozoneOutputStream.getOutputStream()), null); + adapter.createKey(key), null); } @Override @@ -236,7 +233,7 @@ public FSDataOutputStream createNonRecursive(Path path, @Override public FSDataOutputStream append(Path f, int bufferSize, - Progressable progress) throws IOException { + Progressable progress) throws IOException { throw new UnsupportedOperationException("append() Not implemented by the " + getClass().getSimpleName() + " FileSystem implementation"); } @@ -256,7 +253,7 @@ private class RenameIterator extends OzoneListingIterator { @Override boolean processKey(String key) throws IOException { String newKeyName = dstKey.concat(key.substring(srcKey.length())); - bucket.renameKey(key, newKeyName); + adapter.renameKey(key, newKeyName); return true; } } @@ -264,7 +261,7 @@ boolean processKey(String key) throws IOException { /** * Check whether the source and destination path are valid and then perform * rename from source path to destination path. - * + *

* The rename operation is performed by renaming the keys with src as prefix. * For such keys the prefix is changed from src to dst. * @@ -361,6 +358,7 @@ public boolean rename(Path src, Path dst) throws IOException { private class DeleteIterator extends OzoneListingIterator { private boolean recursive; + DeleteIterator(Path f, boolean recursive) throws IOException { super(f); @@ -379,7 +377,7 @@ boolean processKey(String key) throws IOException { return true; } else { LOG.trace("deleting key:" + key); - boolean succeed = deleteObject(key); + boolean succeed = adapter.deleteObject(key); // if recursive delete is requested ignore the return value of // deleteObject and issue deletes for other keys. return recursive || succeed; @@ -390,6 +388,7 @@ boolean processKey(String key) throws IOException { /** * Deletes the children of the input dir path by iterating though the * DeleteIterator. + * * @param f directory path to be deleted * @return true if successfully deletes all required keys, false otherwise * @throws IOException @@ -431,7 +430,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { result = innerDelete(f, recursive); } else { LOG.debug("delete: Path is a file: {}", f); - result = deleteObject(key); + result = adapter.deleteObject(key); } if (result) { @@ -449,6 +448,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { /** * Create a fake parent directory key if it does not already exist and no * other child of this parent directory exists. + * * @param f path to the fake parent directory * @throws IOException */ @@ -457,12 +457,13 @@ private void createFakeDirectoryIfNecessary(Path f) throws IOException { if (!key.isEmpty() && !o3Exists(f)) { LOG.debug("Creating new fake directory at {}", f); String dirKey = addTrailingSlashIfNeeded(key); - createDirectory(dirKey); + adapter.createDirectory(dirKey); } } /** * Check if a file or directory exists corresponding to given path. + * * @param f path to file/directory. * @return true if it exists, false otherwise. * @throws IOException @@ -487,7 +488,7 @@ private class ListStatusIterator extends OzoneListingIterator { new HashMap<>(LISTING_PAGE_SIZE); private Path f; // the input path - ListStatusIterator(Path f) throws IOException { + ListStatusIterator(Path f) throws IOException { super(f); this.f = f; } @@ -495,6 +496,7 @@ private class ListStatusIterator extends OzoneListingIterator { /** * Add the key to the listStatus result if the key corresponds to the * input path or is an immediate child of the input path. + * * @param key key to be processed * @return always returns true * @throws IOException @@ -518,7 +520,7 @@ boolean processKey(String key) throws IOException { if (pathToKey(keyPath.getParent()).equals(pathToKey(f))) { // This key is an immediate child. Can be file or directory if (key.endsWith(OZONE_URI_DELIMITER)) { - // Key is a directory + // Key is a directory addSubDirStatus(keyPath); } else { addFileStatus(keyPath); @@ -537,6 +539,7 @@ boolean processKey(String key) throws IOException { /** * Adds the FileStatus of keyPath to final result of listStatus. + * * @param filePath path to the file * @throws FileNotFoundException */ @@ -547,6 +550,7 @@ void addFileStatus(Path filePath) throws IOException { /** * Adds the FileStatus of the subdir to final result of listStatus, if not * already included. + * * @param dirPath path to the dir * @throws FileNotFoundException */ @@ -560,9 +564,9 @@ void addSubDirStatus(Path dirPath) throws FileNotFoundException { /** * Traverse the parent directory structure of keyPath to determine the * which parent/ grand-parent/.. is the immediate child of the input path f. + * * @param keyPath path whose parent directory structure should be traversed. * @return immediate child path of the input path f. - * @return immediate child path of the input path f. */ Path getImmediateChildPath(Path keyPath) { Path path = keyPath; @@ -610,6 +614,7 @@ public Path getWorkingDirectory() { /** * Get the username of the FS. + * * @return the short name of the user who instantiated the FS */ public String getUsername() { @@ -648,7 +653,7 @@ private boolean mkdir(Path path) throws IOException { LOG.trace("creating directory for fpart:{}", fPart); String key = pathToKey(fPart); String dirKey = addTrailingSlashIfNeeded(key); - if (!createDirectory(dirKey)) { + if (!adapter.createDirectory(dirKey)) { // Directory creation failed here, // rollback and delete newly created directories LOG.trace("Directory creation failed, path:{}", fPart); @@ -682,11 +687,11 @@ public FileStatus getFileStatus(Path f) throws IOException { if (key.length() == 0) { return new FileStatus(0, true, 1, 0, - bucket.getCreationTime(), qualifiedPath); + adapter.getCreationTime(), qualifiedPath); } // Check if the key exists - OzoneKey ozoneKey = getKeyInfo(key); + BasicKeyInfo ozoneKey = adapter.getKeyInfo(key); if (ozoneKey != null) { LOG.debug("Found exact file for path {}: normal file", f); return new FileStatus(ozoneKey.getDataSize(), false, 1, @@ -702,6 +707,7 @@ public FileStatus getFileStatus(Path f) throws IOException { * Get the FileStatus for input directory path. * They key corresponding to input path is appended with a trailing slash * to return only the corresponding directory key in the bucket. + * * @param f directory path * @return FileStatus for the input directory path * @throws FileNotFoundException @@ -712,9 +718,9 @@ public FileStatus innerGetFileStatusForDir(Path f) String key = pathToKey(qualifiedPath); key = addTrailingSlashIfNeeded(key); - OzoneKey ozoneKey = getKeyInfo(key); - if(ozoneKey != null) { - if (isDirectory(ozoneKey)) { + BasicKeyInfo ozoneKey = adapter.getKeyInfo(key); + if (ozoneKey != null) { + if (adapter.isDirectory(ozoneKey)) { // Key is a directory LOG.debug("Found file (with /) for path {}: fake directory", f); } else { @@ -730,7 +736,7 @@ public FileStatus innerGetFileStatusForDir(Path f) // File or directory corresponding to input path does not exist. // Check if there exists a key prefixed with this key. - boolean hasChildren = bucket.listKeys(key).hasNext(); + boolean hasChildren = adapter.hasNextKey(key); if (hasChildren) { return new FileStatus(0, true, 1, 0, 0, 0, FsPermission.getDirDefault(), getUsername(), getUsername(), qualifiedPath); @@ -739,65 +745,6 @@ public FileStatus innerGetFileStatusForDir(Path f) throw new FileNotFoundException(f + ": No such file or directory!"); } - /** - * Helper method to fetch the key metadata info. - * @param key key whose metadata information needs to be fetched - * @return metadata info of the key - */ - private OzoneKey getKeyInfo(String key) { - try { - return bucket.getKey(key); - } catch (IOException e) { - LOG.trace("Key:{} does not exist", key); - return null; - } - } - - /** - * Helper method to check if an Ozone key is representing a directory. - * @param key key to be checked as a directory - * @return true if key is a directory, false otherwise - */ - private boolean isDirectory(OzoneKey key) { - LOG.trace("key name:{} size:{}", key.getName(), - key.getDataSize()); - return key.getName().endsWith(OZONE_URI_DELIMITER) - && (key.getDataSize() == 0); - } - - /** - * Helper method to create an directory specified by key name in bucket. - * @param keyName key name to be created as directory - * @return true if the key is created, false otherwise - */ - private boolean createDirectory(String keyName) { - try { - LOG.trace("creating dir for key:{}", keyName); - bucket.createKey(keyName, 0, replicationType, replicationFactor, - new HashMap<>()).close(); - return true; - } catch (IOException ioe) { - LOG.error("create key failed for key:{}", keyName, ioe); - return false; - } - } - - /** - * Helper method to delete an object specified by key name in bucket. - * @param keyName key name to be deleted - * @return true if the key is deleted, false otherwise - */ - private boolean deleteObject(String keyName) { - LOG.trace("issuing delete for key" + keyName); - try { - bucket.deleteKey(keyName); - return true; - } catch (IOException ioe) { - LOG.error("delete key failed " + ioe.getMessage()); - return false; - } - } - /** * Turn a path (relative or otherwise) into an Ozone key. * @@ -805,7 +752,7 @@ private boolean deleteObject(String keyName) { * @return the key of the object that represents the file. */ public String pathToKey(Path path) { - Objects.requireNonNull(path, "Path can not be null!"); + Objects.requireNonNull(path, "Path canf not be null!"); if (!path.isAbsolute()) { path = new Path(workingDir, path); } @@ -839,17 +786,17 @@ public String toString() { } /** - * This class provides an interface to iterate through all the keys in the - * bucket prefixed with the input path key and process them. - * - * Each implementing class should define how the keys should be processed - * through the processKey() function. + * This class provides an interface to iterate through all the keys in the + * bucket prefixed with the input path key and process them. + *

+ * Each implementing class should define how the keys should be processed + * through the processKey() function. */ private abstract class OzoneListingIterator { private final Path path; private final FileStatus status; private String pathKey; - private Iterator keyIterator; + private Iterator keyIterator; OzoneListingIterator(Path path) throws IOException { @@ -859,12 +806,13 @@ private abstract class OzoneListingIterator { if (status.isDirectory()) { this.pathKey = addTrailingSlashIfNeeded(pathKey); } - keyIterator = bucket.listKeys(pathKey); + keyIterator = adapter.listKeys(pathKey); } /** * The output of processKey determines if further iteration through the * keys should be done or not. + * * @return true if we should continue iteration of keys, false otherwise. * @throws IOException */ @@ -876,6 +824,7 @@ private abstract class OzoneListingIterator { * If for any key, the processKey() returns false, then the iteration is * stopped and returned with false indicating that all the keys could not * be processed successfully. + * * @return true if all keys are processed successfully, false otherwise. * @throws IOException */ @@ -884,7 +833,7 @@ boolean iterate() throws IOException { if (status.isDirectory()) { LOG.trace("Iterating directory:{}", pathKey); while (keyIterator.hasNext()) { - OzoneKey key = keyIterator.next(); + BasicKeyInfo key = keyIterator.next(); LOG.trace("iterating key:{}", key.getName()); if (!processKey(key.getName())) { return false; diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 0dc70b6ce3..12250e78cb 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -11,11 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> - +--> 4.0.0 org.apache.hadoop @@ -43,8 +39,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> common client ozone-manager - tools ozonefs + ozonefs-lib + ozonefs-lib-legacy + tools integration-test objectstore-service datanode @@ -102,6 +100,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-ozone-filesystem ${ozone.version} + + org.apache.hadoop + hadoop-ozone-filesystem-lib + ${ozone.version} + + + org.apache.hadoop + hadoop-ozone-filesystem-lib-legacy + ${ozone.version} + org.apache.hadoop hadoop-ozone-integration-test @@ -207,6 +215,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-tools + org.apache.hadoop hadoop-common @@ -304,7 +313,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.codehaus.mojo findbugs-maven-plugin - +