HDDS-922. Create isolated classloder to use ozonefs with any older hadoop versions. Contributed by Elek, Marton.
This commit is contained in:
parent
214112b2d7
commit
a65aca2fef
@ -47,6 +47,8 @@ public OzoneConfiguration() {
|
|||||||
|
|
||||||
public OzoneConfiguration(Configuration conf) {
|
public OzoneConfiguration(Configuration conf) {
|
||||||
super(conf);
|
super(conf);
|
||||||
|
//load the configuration from the classloader of the original conf.
|
||||||
|
setClassLoader(conf.getClassLoader());
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Property> readPropertyFromXml(URL url) throws JAXBException {
|
public List<Property> readPropertyFromXml(URL url) throws JAXBException {
|
||||||
|
@ -371,6 +371,13 @@ public final class OzoneConfigKeys {
|
|||||||
public static final boolean OZONE_ACL_ENABLED_DEFAULT =
|
public static final boolean OZONE_ACL_ENABLED_DEFAULT =
|
||||||
false;
|
false;
|
||||||
|
|
||||||
|
//For technical reasons this is unused and hardcoded to the
|
||||||
|
// OzoneFileSystem.initialize.
|
||||||
|
public static final String OZONE_FS_ISOLATED_CLASSLOADER =
|
||||||
|
"ozone.fs.isolated-classloader";
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* There is no need to instantiate this class.
|
* There is no need to instantiate this class.
|
||||||
*/
|
*/
|
||||||
|
@ -1806,4 +1806,19 @@
|
|||||||
not be renewed.
|
not be renewed.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>ozone.fs.isolated-classloader</name>
|
||||||
|
<value></value>
|
||||||
|
<tag>OZONE, OZONEFS</tag>
|
||||||
|
<description>
|
||||||
|
Enable it for older hadoops to separate the classloading of all the
|
||||||
|
Ozone classes. With 'true' value, ozonefs can be used with older
|
||||||
|
hadoop versions as the hadoop3/ozone related classes are loaded by
|
||||||
|
an isolated classloader.
|
||||||
|
|
||||||
|
Default depends from the used jar. true for ozone-filesystem-lib-legacy
|
||||||
|
jar and false for the ozone-filesystem-lib.jar
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
</configuration>
|
</configuration>
|
@ -56,12 +56,11 @@ This will make this bucket to be the default file system for HDFS dfs commands a
|
|||||||
You also need to add the ozone-filesystem.jar file to the classpath:
|
You also need to add the ozone-filesystem.jar file to the classpath:
|
||||||
|
|
||||||
{{< highlight bash >}}
|
{{< highlight bash >}}
|
||||||
export HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozonefs/hadoop-ozone-filesystem.jar:$HADOOP_CLASSPATH
|
export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-lib-.*.jar:$HADOOP_CLASSPATH
|
||||||
{{< /highlight >}}
|
{{< /highlight >}}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc.
|
Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc.
|
||||||
For example,
|
For example,
|
||||||
|
|
||||||
@ -78,3 +77,19 @@ hdfs dfs -mkdir /users
|
|||||||
|
|
||||||
Or put command etc. In other words, all programs like Hive, Spark, and Distcp will work against this file system.
|
Or put command etc. In other words, all programs like Hive, Spark, and Distcp will work against this file system.
|
||||||
Please note that any keys created/deleted in the bucket using methods apart from OzoneFileSystem will show up as diectories and files in the Ozone File System.
|
Please note that any keys created/deleted in the bucket using methods apart from OzoneFileSystem will show up as diectories and files in the Ozone File System.
|
||||||
|
|
||||||
|
## Legacy mode
|
||||||
|
|
||||||
|
There are two ozonefs files which includes all the dependencies:
|
||||||
|
|
||||||
|
* share/ozone/lib/hadoop-ozone-filesystem-lib-VERSION.jar
|
||||||
|
* share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-VERSION.jar
|
||||||
|
|
||||||
|
The first one contains all the required dependency to use ozonefs with a
|
||||||
|
compatible hadoop version (hadoop 3.2 / 3.1).
|
||||||
|
|
||||||
|
The second one contains all the dependency in an internal, separated directory,
|
||||||
|
and a special class loader is used to load all the classes from the location.
|
||||||
|
|
||||||
|
With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from
|
||||||
|
any older hadoop version (eg. hadoop 2.7 or spark+hadoop 2.7)
|
@ -106,10 +106,6 @@ run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/workers.sh" "sb
|
|||||||
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/"
|
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/"
|
||||||
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/"
|
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/"
|
||||||
|
|
||||||
#shaded ozonefs
|
|
||||||
run mkdir -p "./share/hadoop/ozonefs"
|
|
||||||
run cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem-${HDDS_VERSION}.jar"
|
|
||||||
|
|
||||||
#shaded datanode service
|
#shaded datanode service
|
||||||
run mkdir -p "./share/hadoop/ozoneplugin"
|
run mkdir -p "./share/hadoop/ozoneplugin"
|
||||||
run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar"
|
run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar"
|
||||||
|
15
hadoop-ozone/dist/pom.xml
vendored
15
hadoop-ozone/dist/pom.xml
vendored
@ -82,6 +82,13 @@
|
|||||||
<classifier>classpath</classifier>
|
<classifier>classpath</classifier>
|
||||||
<destFileName>hadoop-ozone-tools.classpath</destFileName>
|
<destFileName>hadoop-ozone-tools.classpath</destFileName>
|
||||||
</artifactItem>
|
</artifactItem>
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem</artifactId>
|
||||||
|
<version>${ozone.version}</version>
|
||||||
|
<classifier>classpath</classifier>
|
||||||
|
<destFileName>hadoop-ozone-filesystem.classpath</destFileName>
|
||||||
|
</artifactItem>
|
||||||
<artifactItem>
|
<artifactItem>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-ozone-common</artifactId>
|
<artifactId>hadoop-ozone-common</artifactId>
|
||||||
@ -183,6 +190,14 @@
|
|||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-ozone-tools</artifactId>
|
<artifactId>hadoop-ozone-tools</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem-lib</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-ozone-common</artifactId>
|
<artifactId>hadoop-ozone-common</artifactId>
|
||||||
|
@ -19,12 +19,12 @@ services:
|
|||||||
datanode:
|
datanode:
|
||||||
image: apache/hadoop-runner
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ../..:/opt/hadoop
|
- ../..:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
- 9864
|
- 9864
|
||||||
command: ["/opt/hadoop/bin/ozone","datanode"]
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
ozoneManager:
|
ozoneManager:
|
||||||
image: apache/hadoop-runner
|
image: apache/hadoop-runner
|
||||||
hostname: ozoneManager
|
hostname: ozoneManager
|
||||||
@ -36,7 +36,7 @@ services:
|
|||||||
ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
|
ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
|
||||||
WAITFOR: scm:9876
|
WAITFOR: scm:9876
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/ozone","om"]
|
command: ["/opt/hadoop/bin/ozone","om"]
|
||||||
scm:
|
scm:
|
||||||
image: apache/hadoop-runner
|
image: apache/hadoop-runner
|
||||||
@ -45,16 +45,25 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- 9876
|
- 9876
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
environment:
|
environment:
|
||||||
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
||||||
command: ["/opt/hadoop/bin/ozone","scm"]
|
command: ["/opt/hadoop/bin/ozone","scm"]
|
||||||
hadooplast:
|
hadoop3:
|
||||||
image: flokkr/hadoop:3.1.0
|
image: flokkr/hadoop:3.1.0
|
||||||
volumes:
|
volumes:
|
||||||
- ../..:/opt/ozone
|
- ../..:/opt/ozone
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
environment:
|
environment:
|
||||||
HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozonefs/*.jar
|
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-0*.jar
|
||||||
|
command: ["watch","-n","100000","ls"]
|
||||||
|
hadoop2:
|
||||||
|
image: flokkr/hadoop:2.9.0
|
||||||
|
volumes:
|
||||||
|
- ../..:/opt/ozone
|
||||||
|
env_file:
|
||||||
|
- ./docker-config
|
||||||
|
environment:
|
||||||
|
HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy*.jar
|
||||||
command: ["watch","-n","100000","ls"]
|
command: ["watch","-n","100000","ls"]
|
||||||
|
104
hadoop-ozone/ozonefs-lib-legacy/pom.xml
Normal file
104
hadoop-ozone/ozonefs-lib-legacy/pom.xml
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone</artifactId>
|
||||||
|
<version>0.4.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
|
||||||
|
<name>Apache Hadoop Ozone FileSystem Legacy Jar Library</name>
|
||||||
|
<description>This projects creates an uberjar from ozonefs with all the
|
||||||
|
dependencies, but the dependencies are located in an isolated subdir
|
||||||
|
and loaded by a custom class loader. Can be used together with Hadoop 2.x
|
||||||
|
</description>
|
||||||
|
<packaging>jar</packaging>
|
||||||
|
<version>0.4.0-SNAPSHOT</version>
|
||||||
|
<properties>
|
||||||
|
<file.encoding>UTF-8</file.encoding>
|
||||||
|
<downloadSources>true</downloadSources>
|
||||||
|
</properties>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-jar-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<goals>
|
||||||
|
<goal>test-jar</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-dependency-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>include-dependencies</id>
|
||||||
|
<goals>
|
||||||
|
<goal>unpack-dependencies</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>prepare-package</phase>
|
||||||
|
<configuration>
|
||||||
|
<outputDirectory>target/classes/libs</outputDirectory>
|
||||||
|
<includeScope>compile</includeScope>
|
||||||
|
<excludes>META-INF/*.SF</excludes>
|
||||||
|
<excludeArtifactIds>
|
||||||
|
slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j,hadoop-ozone-filesystem
|
||||||
|
</excludeArtifactIds>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>include-ozonefs</id>
|
||||||
|
<goals>
|
||||||
|
<goal>unpack-dependencies</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>prepare-package</phase>
|
||||||
|
<configuration>
|
||||||
|
<outputDirectory>target/classes</outputDirectory>
|
||||||
|
<includeArtifactIds>hadoop-ozone-filesystem</includeArtifactIds>
|
||||||
|
<includeScope>compile</includeScope>
|
||||||
|
<excludes>META-INF/*.SF</excludes>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>animal-sniffer-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>signature-check</id>
|
||||||
|
<phase></phase>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem</artifactId>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
@ -0,0 +1,21 @@
|
|||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
Apache Hadoop Ozone placeholder file.
|
||||||
|
|
||||||
|
The usage of the legacy version of the uber jar can be detected based on
|
||||||
|
the existence of this file.
|
89
hadoop-ozone/ozonefs-lib/pom.xml
Normal file
89
hadoop-ozone/ozonefs-lib/pom.xml
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone</artifactId>
|
||||||
|
<version>0.4.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<artifactId>hadoop-ozone-filesystem-lib</artifactId>
|
||||||
|
<name>Apache Hadoop Ozone FileSystem Single Jar Library</name>
|
||||||
|
<packaging>jar</packaging>
|
||||||
|
<description>This projects creates an uber jar from ozonefs with all the
|
||||||
|
dependencies.
|
||||||
|
</description>
|
||||||
|
<version>0.4.0-SNAPSHOT</version>
|
||||||
|
<properties>
|
||||||
|
<file.encoding>UTF-8</file.encoding>
|
||||||
|
<downloadSources>true</downloadSources>
|
||||||
|
</properties>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-jar-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<goals>
|
||||||
|
<goal>test-jar</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-dependency-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<goals>
|
||||||
|
<goal>unpack-dependencies</goal>
|
||||||
|
</goals>
|
||||||
|
<phase>prepare-package</phase>
|
||||||
|
<configuration>
|
||||||
|
<outputDirectory>target/classes</outputDirectory>
|
||||||
|
<includeScope>compile</includeScope>
|
||||||
|
<excludes>META-INF/*.SF</excludes>
|
||||||
|
<excludeArtifactIds>
|
||||||
|
slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j
|
||||||
|
</excludeArtifactIds>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>animal-sniffer-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>signature-check</id>
|
||||||
|
<phase></phase>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem</artifactId>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</project>
|
@ -43,46 +43,6 @@
|
|||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-shade-plugin</artifactId>
|
|
||||||
<version>3.1.1</version>
|
|
||||||
<configuration>
|
|
||||||
<artifactSet>
|
|
||||||
<includes>
|
|
||||||
<include>com.google.guava:guava</include>
|
|
||||||
<include>org.slf4j:slf4j-api</include>
|
|
||||||
<include>com.google.protobuf:protobuf-java</include>
|
|
||||||
<include>com.nimbusds:nimbus-jose-jwt</include>
|
|
||||||
<include>com.github.stephenc.jcip:jcip-annotations</include>
|
|
||||||
<include>com.google.code.findbugs:jsr305</include>
|
|
||||||
<include>org.apache.hadoop:hadoop-ozone-client</include>
|
|
||||||
<include>org.apache.hadoop:hadoop-hdds-client</include>
|
|
||||||
<include>org.apache.hadoop:hadoop-hdds-common</include>
|
|
||||||
<include>org.fusesource.leveldbjni:leveldbjni-all</include>
|
|
||||||
<include>org.apache.ratis:ratis-server</include>
|
|
||||||
<include>org.apache.ratis:ratis-proto-shaded</include>
|
|
||||||
<include>com.google.auto.value:auto-value-annotations</include>
|
|
||||||
<include>com.squareup:javapoet</include>
|
|
||||||
<include>org.jctools:jctools-core</include>
|
|
||||||
<include>org.apache.ratis:ratis-common</include>
|
|
||||||
<include>org.apache.ratis:ratis-client</include>
|
|
||||||
<include>org.apache.ratis:ratis-netty</include>
|
|
||||||
<include>org.apache.ratis:ratis-grpc</include>
|
|
||||||
<include>org.rocksdb:rocksdbjni</include>
|
|
||||||
<include>org.apache.hadoop:hadoop-ozone-common</include>
|
|
||||||
</includes>
|
|
||||||
</artifactSet>
|
|
||||||
</configuration>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<phase>package</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>shade</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-dependency-plugin</artifactId>
|
<artifactId>maven-dependency-plugin</artifactId>
|
||||||
@ -96,7 +56,7 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<!-- build a shellprofile -->
|
<!-- build a shellprofile -->
|
||||||
<outputFile>
|
<outputFile>
|
||||||
${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt
|
${project.basedir}/target/1hadoop-tools-deps/${project.artifactId}.tools-optional.txt
|
||||||
</outputFile>
|
</outputFile>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
@ -111,6 +71,16 @@
|
|||||||
</includes>
|
</includes>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>animal-sniffer-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>signature-check</id>
|
||||||
|
<phase></phase>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
|
|
||||||
@ -118,17 +88,17 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs</artifactId>
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs-client</artifactId>
|
<artifactId>hadoop-hdfs-client</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
@ -0,0 +1,53 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Minimum set of Ozone key information attributes.
|
||||||
|
* <p>
|
||||||
|
* This class doesn't depend on any other ozone class just on primitive
|
||||||
|
* java types. It could be used easily in the signature of OzoneClientAdapter
|
||||||
|
* as even if a separated class loader is loaded it it won't cause any
|
||||||
|
* dependency problem.
|
||||||
|
*/
|
||||||
|
public class BasicKeyInfo {
|
||||||
|
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
private long modificationTime;
|
||||||
|
|
||||||
|
private long dataSize;
|
||||||
|
|
||||||
|
public BasicKeyInfo(String name, long modificationTime, long size) {
|
||||||
|
this.name = name;
|
||||||
|
this.modificationTime = modificationTime;
|
||||||
|
this.dataSize = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getModificationTime() {
|
||||||
|
return modificationTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getDataSize() {
|
||||||
|
return dataSize;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,84 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
|
import java.net.URL;
|
||||||
|
import java.net.URLClassLoader;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class loader which delegates the loading only for the selected class.
|
||||||
|
*
|
||||||
|
* <p>
|
||||||
|
* By default java classloader delegates first all the class loading to the
|
||||||
|
* parent, and loads the class only if it's not found in the class.
|
||||||
|
* <p>
|
||||||
|
* This simple class loader do the opposit. Everything is loaded with this
|
||||||
|
* class loader without delegation _except_ the few classes which are defined
|
||||||
|
* in the constructor.
|
||||||
|
* <p>
|
||||||
|
* With this method we can use two separated class loader (the original main
|
||||||
|
* classloader and instance of this which loaded separated classes, but the
|
||||||
|
* few selected classes are shared between the two class loaders.
|
||||||
|
* <p>
|
||||||
|
* With this approach it's possible to use any older hadoop version
|
||||||
|
* (main classloader) together with ozonefs (instance of this classloader) as
|
||||||
|
* only the selected classes are selected between the class loaders.
|
||||||
|
*/
|
||||||
|
public class FilteredClassLoader extends URLClassLoader {
|
||||||
|
|
||||||
|
private final ClassLoader systemClassLoader;
|
||||||
|
|
||||||
|
private final ClassLoader delegate;
|
||||||
|
private Set<String> delegatedClasses = new HashSet<>();
|
||||||
|
|
||||||
|
public FilteredClassLoader(URL[] urls, ClassLoader parent) {
|
||||||
|
super(urls, null);
|
||||||
|
delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneClientAdapter");
|
||||||
|
delegatedClasses.add("org.apache.hadoop.fs.ozone.BasicKeyInfo");
|
||||||
|
delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSOutputStream");
|
||||||
|
delegatedClasses.add("org.apache.hadoop.fs.Seekable");
|
||||||
|
this.delegate = parent;
|
||||||
|
systemClassLoader = getSystemClassLoader();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Class<?> loadClass(String name) throws ClassNotFoundException {
|
||||||
|
if (delegatedClasses.contains(name) ||
|
||||||
|
name.startsWith("org.apache.log4j") ||
|
||||||
|
name.startsWith("org.slf4j")) {
|
||||||
|
return delegate.loadClass(name);
|
||||||
|
}
|
||||||
|
return super.loadClass(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Class<?> loadFromSystem(String name) {
|
||||||
|
if (systemClassLoader != null) {
|
||||||
|
try {
|
||||||
|
return systemClassLoader.loadClass(name);
|
||||||
|
} catch (ClassNotFoundException ex) {
|
||||||
|
//no problem
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,55 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.Iterator;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lightweight adapter to separte hadoop/ozone classes.
|
||||||
|
* <p>
|
||||||
|
* This class contains only the bare minimum Ozone classes in the signature.
|
||||||
|
* It could be loaded by a different classloader because only the objects in
|
||||||
|
* the method signatures should be shared between the classloader.
|
||||||
|
*/
|
||||||
|
public interface OzoneClientAdapter {
|
||||||
|
|
||||||
|
void close() throws IOException;
|
||||||
|
|
||||||
|
InputStream createInputStream(String key) throws IOException;
|
||||||
|
|
||||||
|
OzoneFSOutputStream createKey(String key) throws IOException;
|
||||||
|
|
||||||
|
void renameKey(String key, String newKeyName) throws IOException;
|
||||||
|
|
||||||
|
BasicKeyInfo getKeyInfo(String keyName);
|
||||||
|
|
||||||
|
boolean isDirectory(BasicKeyInfo key);
|
||||||
|
|
||||||
|
boolean createDirectory(String keyName);
|
||||||
|
|
||||||
|
boolean deleteObject(String keyName);
|
||||||
|
|
||||||
|
long getCreationTime();
|
||||||
|
|
||||||
|
boolean hasNextKey(String key);
|
||||||
|
|
||||||
|
Iterator<BasicKeyInfo> listKeys(String pathKey);
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,119 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Enumeration;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates OzoneClientAdapter with classloader separation.
|
||||||
|
*/
|
||||||
|
public final class OzoneClientAdapterFactory {
|
||||||
|
|
||||||
|
static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(OzoneClientAdapterFactory.class);
|
||||||
|
|
||||||
|
private OzoneClientAdapterFactory() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public static OzoneClientAdapter createAdapter(
|
||||||
|
String volumeStr,
|
||||||
|
String bucketStr)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
ClassLoader currentClassLoader = OzoneFileSystem.class.getClassLoader();
|
||||||
|
List<URL> urls = new ArrayList<>();
|
||||||
|
|
||||||
|
findEmbeddedLibsUrl(urls, currentClassLoader);
|
||||||
|
|
||||||
|
findConfigDirUrl(urls, currentClassLoader);
|
||||||
|
|
||||||
|
ClassLoader classLoader =
|
||||||
|
new FilteredClassLoader(urls.toArray(new URL[0]), currentClassLoader);
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
ClassLoader contextClassLoader =
|
||||||
|
Thread.currentThread().getContextClassLoader();
|
||||||
|
Thread.currentThread().setContextClassLoader(classLoader);
|
||||||
|
|
||||||
|
//this class caches the context classloader during the first load
|
||||||
|
//call it here when the context class loader is set to the isoloated
|
||||||
|
//loader to make sure the grpc class will be loaded by the right
|
||||||
|
//loader
|
||||||
|
Class<?> reflectionUtils =
|
||||||
|
classLoader.loadClass("org.apache.ratis.util.ReflectionUtils");
|
||||||
|
reflectionUtils.getMethod("getClassByName", String.class)
|
||||||
|
.invoke(null, "org.apache.ratis.grpc.GrpcFactory");
|
||||||
|
|
||||||
|
OzoneClientAdapter ozoneClientAdapter = (OzoneClientAdapter) classLoader
|
||||||
|
.loadClass("org.apache.hadoop.fs.ozone.OzoneClientAdapterImpl")
|
||||||
|
.getConstructor(String.class, String.class)
|
||||||
|
.newInstance(
|
||||||
|
volumeStr,
|
||||||
|
bucketStr);
|
||||||
|
|
||||||
|
Thread.currentThread().setContextClassLoader(contextClassLoader);
|
||||||
|
|
||||||
|
return ozoneClientAdapter;
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error("Can't initialize the ozoneClientAdapter", e);
|
||||||
|
throw new IOException(
|
||||||
|
"Can't initialize the OzoneClientAdapter implementation", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void findConfigDirUrl(List<URL> urls,
|
||||||
|
ClassLoader currentClassLoader) throws IOException {
|
||||||
|
Enumeration<URL> conf =
|
||||||
|
currentClassLoader.getResources("ozone-site.xml");
|
||||||
|
while (conf.hasMoreElements()) {
|
||||||
|
urls.add(
|
||||||
|
new URL(
|
||||||
|
conf.nextElement().toString().replace("ozone-site.xml", "")));
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void findEmbeddedLibsUrl(List<URL> urls,
|
||||||
|
ClassLoader currentClassloader)
|
||||||
|
throws MalformedURLException {
|
||||||
|
|
||||||
|
//marker file is added to the jar to make it easier to find the URL
|
||||||
|
// for the current jar.
|
||||||
|
String markerFile = "ozonefs.txt";
|
||||||
|
ClassLoader currentClassLoader = OzoneFileSystem.class.getClassLoader();
|
||||||
|
|
||||||
|
URL ozFs = currentClassLoader
|
||||||
|
.getResource(markerFile);
|
||||||
|
String rootPath = ozFs.toString().replace(markerFile, "");
|
||||||
|
urls.add(new URL(rootPath));
|
||||||
|
|
||||||
|
urls.add(new URL(rootPath + "libs/"));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,235 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Iterator;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
||||||
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
|
import org.apache.hadoop.ozone.client.ObjectStore;
|
||||||
|
import org.apache.hadoop.ozone.client.OzoneBucket;
|
||||||
|
import org.apache.hadoop.ozone.client.OzoneClient;
|
||||||
|
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
||||||
|
import org.apache.hadoop.ozone.client.OzoneKey;
|
||||||
|
import org.apache.hadoop.ozone.client.OzoneVolume;
|
||||||
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of the OzoneFileSystem calls.
|
||||||
|
*/
|
||||||
|
public class OzoneClientAdapterImpl implements OzoneClientAdapter {
|
||||||
|
|
||||||
|
static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(OzoneClientAdapterImpl.class);
|
||||||
|
|
||||||
|
private OzoneClient ozoneClient;
|
||||||
|
private ObjectStore objectStore;
|
||||||
|
private OzoneVolume volume;
|
||||||
|
private OzoneBucket bucket;
|
||||||
|
private ReplicationType replicationType;
|
||||||
|
private ReplicationFactor replicationFactor;
|
||||||
|
|
||||||
|
public OzoneClientAdapterImpl(String volumeStr, String bucketStr)
|
||||||
|
throws IOException {
|
||||||
|
this(createConf(), volumeStr, bucketStr);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static OzoneConfiguration createConf() {
|
||||||
|
ClassLoader contextClassLoader =
|
||||||
|
Thread.currentThread().getContextClassLoader();
|
||||||
|
Thread.currentThread().setContextClassLoader(null);
|
||||||
|
OzoneConfiguration conf = new OzoneConfiguration();
|
||||||
|
Thread.currentThread().setContextClassLoader(contextClassLoader);
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
public OzoneClientAdapterImpl(OzoneConfiguration conf, String volumeStr,
|
||||||
|
String bucketStr) throws IOException {
|
||||||
|
ClassLoader contextClassLoader =
|
||||||
|
Thread.currentThread().getContextClassLoader();
|
||||||
|
Thread.currentThread().setContextClassLoader(null);
|
||||||
|
try {
|
||||||
|
String replicationTypeConf =
|
||||||
|
conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
|
||||||
|
OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT);
|
||||||
|
|
||||||
|
int replicationCountConf = conf.getInt(OzoneConfigKeys.OZONE_REPLICATION,
|
||||||
|
OzoneConfigKeys.OZONE_REPLICATION_DEFAULT);
|
||||||
|
this.ozoneClient =
|
||||||
|
OzoneClientFactory.getRpcClient(conf);
|
||||||
|
objectStore = ozoneClient.getObjectStore();
|
||||||
|
this.volume = objectStore.getVolume(volumeStr);
|
||||||
|
this.bucket = volume.getBucket(bucketStr);
|
||||||
|
this.replicationType = ReplicationType.valueOf(replicationTypeConf);
|
||||||
|
this.replicationFactor = ReplicationFactor.valueOf(replicationCountConf);
|
||||||
|
} finally {
|
||||||
|
Thread.currentThread().setContextClassLoader(contextClassLoader);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
ozoneClient.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream createInputStream(String key) throws IOException {
|
||||||
|
return bucket.readKey(key).getInputStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OzoneFSOutputStream createKey(String key) throws IOException {
|
||||||
|
OzoneOutputStream ozoneOutputStream =
|
||||||
|
bucket.createKey(key, 0, replicationType, replicationFactor,
|
||||||
|
new HashMap<>());
|
||||||
|
return new OzoneFSOutputStream(ozoneOutputStream.getOutputStream());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void renameKey(String key, String newKeyName) throws IOException {
|
||||||
|
bucket.renameKey(key, newKeyName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method to fetch the key metadata info.
|
||||||
|
*
|
||||||
|
* @param keyName key whose metadata information needs to be fetched
|
||||||
|
* @return metadata info of the key
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public BasicKeyInfo getKeyInfo(String keyName) {
|
||||||
|
try {
|
||||||
|
OzoneKey key = bucket.getKey(keyName);
|
||||||
|
return new BasicKeyInfo(
|
||||||
|
keyName,
|
||||||
|
key.getModificationTime(),
|
||||||
|
key.getDataSize()
|
||||||
|
);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.trace("Key:{} does not exist", keyName);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method to check if an Ozone key is representing a directory.
|
||||||
|
*
|
||||||
|
* @param key key to be checked as a directory
|
||||||
|
* @return true if key is a directory, false otherwise
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isDirectory(BasicKeyInfo key) {
|
||||||
|
LOG.trace("key name:{} size:{}", key.getName(),
|
||||||
|
key.getDataSize());
|
||||||
|
return key.getName().endsWith(OZONE_URI_DELIMITER)
|
||||||
|
&& (key.getDataSize() == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method to create an directory specified by key name in bucket.
|
||||||
|
*
|
||||||
|
* @param keyName key name to be created as directory
|
||||||
|
* @return true if the key is created, false otherwise
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean createDirectory(String keyName) {
|
||||||
|
try {
|
||||||
|
LOG.trace("creating dir for key:{}", keyName);
|
||||||
|
bucket.createKey(keyName, 0, replicationType, replicationFactor,
|
||||||
|
new HashMap<>()).close();
|
||||||
|
return true;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.error("create key failed for key:{}", keyName, ioe);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method to delete an object specified by key name in bucket.
|
||||||
|
*
|
||||||
|
* @param keyName key name to be deleted
|
||||||
|
* @return true if the key is deleted, false otherwise
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean deleteObject(String keyName) {
|
||||||
|
LOG.trace("issuing delete for key" + keyName);
|
||||||
|
try {
|
||||||
|
bucket.deleteKey(keyName);
|
||||||
|
return true;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.error("delete key failed " + ioe.getMessage());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getCreationTime() {
|
||||||
|
return bucket.getCreationTime();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNextKey(String key) {
|
||||||
|
return bucket.listKeys(key).hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterator<BasicKeyInfo> listKeys(String pathKey) {
|
||||||
|
return new IteratorAdapter(bucket.listKeys(pathKey));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adapter to conver OzoneKey to a safe and simple Key implementation.
|
||||||
|
*/
|
||||||
|
public static class IteratorAdapter implements Iterator<BasicKeyInfo> {
|
||||||
|
|
||||||
|
private Iterator<? extends OzoneKey> original;
|
||||||
|
|
||||||
|
public IteratorAdapter(Iterator<? extends OzoneKey> listKeys) {
|
||||||
|
this.original = listKeys;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
return original.hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public BasicKeyInfo next() {
|
||||||
|
OzoneKey next = original.next();
|
||||||
|
if (next == null) {
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
return new BasicKeyInfo(
|
||||||
|
next.getName(),
|
||||||
|
next.getModificationTime(),
|
||||||
|
next.getDataSize()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -18,13 +18,13 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.fs.ozone;
|
package org.apache.hadoop.fs.ozone;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.FSInputStream;
|
import org.apache.hadoop.fs.FSInputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.KeyInputStream;
|
import org.apache.hadoop.fs.Seekable;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The input stream for Ozone file system.
|
* The input stream for Ozone file system.
|
||||||
@ -36,10 +36,10 @@
|
|||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public final class OzoneFSInputStream extends FSInputStream {
|
public final class OzoneFSInputStream extends FSInputStream {
|
||||||
|
|
||||||
private final KeyInputStream inputStream;
|
private final InputStream inputStream;
|
||||||
|
|
||||||
public OzoneFSInputStream(InputStream inputStream) {
|
public OzoneFSInputStream(InputStream inputStream) {
|
||||||
this.inputStream = (KeyInputStream)inputStream;
|
this.inputStream = inputStream;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -59,12 +59,12 @@ public synchronized void close() throws IOException {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void seek(long pos) throws IOException {
|
public void seek(long pos) throws IOException {
|
||||||
inputStream.seek(pos);
|
((Seekable) inputStream).seek(pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getPos() throws IOException {
|
public long getPos() throws IOException {
|
||||||
return inputStream.getPos();
|
return ((Seekable) inputStream).getPos();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -31,10 +30,10 @@
|
|||||||
*/
|
*/
|
||||||
public class OzoneFSOutputStream extends OutputStream {
|
public class OzoneFSOutputStream extends OutputStream {
|
||||||
|
|
||||||
private final KeyOutputStream outputStream;
|
private final OutputStream outputStream;
|
||||||
|
|
||||||
public OzoneFSOutputStream(OutputStream outputStream) {
|
public OzoneFSOutputStream(OutputStream outputStream) {
|
||||||
this.outputStream = (KeyOutputStream)outputStream;
|
this.outputStream = outputStream;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -25,20 +25,18 @@
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
@ -47,34 +45,25 @@
|
|||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
||||||
import org.apache.hadoop.ozone.client.ObjectStore;
|
|
||||||
import org.apache.hadoop.ozone.client.OzoneBucket;
|
|
||||||
import org.apache.hadoop.ozone.client.OzoneClient;
|
|
||||||
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
|
||||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
|
||||||
import org.apache.hadoop.ozone.client.OzoneKey;
|
|
||||||
import org.apache.hadoop.ozone.client.OzoneVolume;
|
|
||||||
import org.apache.hadoop.hdds.client.ReplicationFactor;
|
|
||||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
|
||||||
import org.apache.http.client.utils.URIBuilder;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
|
||||||
import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
|
import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
|
||||||
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
|
|
||||||
import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
|
import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
|
||||||
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
|
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
|
||||||
import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
|
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
|
||||||
|
import org.apache.http.client.utils.URIBuilder;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The Ozone Filesystem implementation.
|
* The Ozone Filesystem implementation.
|
||||||
*
|
* <p>
|
||||||
* This subclass is marked as private as code should not be creating it
|
* This subclass is marked as private as code should not be creating it
|
||||||
* directly; use {@link FileSystem#get(Configuration)} and variants to create
|
* directly; use {@link FileSystem#get(Configuration)} and variants to create
|
||||||
* one. If cast to {@link OzoneFileSystem}, extra methods and features may be
|
* one. If cast to {@link OzoneFileSystem}, extra methods and features may be
|
||||||
@ -85,16 +74,15 @@
|
|||||||
public class OzoneFileSystem extends FileSystem {
|
public class OzoneFileSystem extends FileSystem {
|
||||||
static final Logger LOG = LoggerFactory.getLogger(OzoneFileSystem.class);
|
static final Logger LOG = LoggerFactory.getLogger(OzoneFileSystem.class);
|
||||||
|
|
||||||
/** The Ozone client for connecting to Ozone server. */
|
/**
|
||||||
private OzoneClient ozoneClient;
|
* The Ozone client for connecting to Ozone server.
|
||||||
private ObjectStore objectStore;
|
*/
|
||||||
private OzoneVolume volume;
|
|
||||||
private OzoneBucket bucket;
|
|
||||||
private URI uri;
|
private URI uri;
|
||||||
private String userName;
|
private String userName;
|
||||||
private Path workingDir;
|
private Path workingDir;
|
||||||
private ReplicationType replicationType;
|
|
||||||
private ReplicationFactor replicationFactor;
|
private OzoneClientAdapter adapter;
|
||||||
|
|
||||||
private static final Pattern URL_SCHEMA_PATTERN =
|
private static final Pattern URL_SCHEMA_PATTERN =
|
||||||
Pattern.compile("(.+)\\.([^\\.]+)");
|
Pattern.compile("(.+)\\.([^\\.]+)");
|
||||||
@ -102,11 +90,7 @@ public class OzoneFileSystem extends FileSystem {
|
|||||||
@Override
|
@Override
|
||||||
public void initialize(URI name, Configuration conf) throws IOException {
|
public void initialize(URI name, Configuration conf) throws IOException {
|
||||||
super.initialize(name, conf);
|
super.initialize(name, conf);
|
||||||
if(!(conf instanceof OzoneConfiguration)) {
|
setConf(conf);
|
||||||
setConf(new OzoneConfiguration(conf));
|
|
||||||
} else {
|
|
||||||
setConf(conf);
|
|
||||||
}
|
|
||||||
Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name);
|
Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name);
|
||||||
assert getScheme().equals(name.getScheme());
|
assert getScheme().equals(name.getScheme());
|
||||||
|
|
||||||
@ -125,16 +109,32 @@ public void initialize(URI name, Configuration conf) throws IOException {
|
|||||||
uri = new URIBuilder().setScheme(OZONE_URI_SCHEME)
|
uri = new URIBuilder().setScheme(OZONE_URI_SCHEME)
|
||||||
.setHost(authority).build();
|
.setHost(authority).build();
|
||||||
LOG.trace("Ozone URI for ozfs initialization is " + uri);
|
LOG.trace("Ozone URI for ozfs initialization is " + uri);
|
||||||
this.ozoneClient = OzoneClientFactory.getRpcClient(getConf());
|
|
||||||
objectStore = ozoneClient.getObjectStore();
|
//isolated is the default for ozonefs-lib-legacy which includes the
|
||||||
this.volume = objectStore.getVolume(volumeStr);
|
// /ozonefs.txt, otherwise the default is false. It could be overridden.
|
||||||
this.bucket = volume.getBucket(bucketStr);
|
boolean defaultValue =
|
||||||
this.replicationType = ReplicationType.valueOf(
|
OzoneFileSystem.class.getClassLoader().getResource("ozonefs.txt")
|
||||||
getConf().get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
|
!= null;
|
||||||
OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT));
|
|
||||||
this.replicationFactor = ReplicationFactor.valueOf(
|
//Use string here instead of the constant as constant may not be available
|
||||||
getConf().getInt(OzoneConfigKeys.OZONE_REPLICATION,
|
//on the classpath of a hadoop 2.7
|
||||||
OzoneConfigKeys.OZONE_REPLICATION_DEFAULT));
|
boolean isolatedClassloader =
|
||||||
|
conf.getBoolean("ozone.fs.isolated-classloader", defaultValue);
|
||||||
|
|
||||||
|
if (isolatedClassloader) {
|
||||||
|
this.adapter =
|
||||||
|
OzoneClientAdapterFactory.createAdapter(volumeStr, bucketStr);
|
||||||
|
} else {
|
||||||
|
OzoneConfiguration ozoneConfiguration;
|
||||||
|
if (conf instanceof OzoneConfiguration) {
|
||||||
|
ozoneConfiguration = (OzoneConfiguration) conf;
|
||||||
|
} else {
|
||||||
|
ozoneConfiguration = new OzoneConfiguration(conf);
|
||||||
|
}
|
||||||
|
this.adapter = new OzoneClientAdapterImpl(ozoneConfiguration,
|
||||||
|
volumeStr, bucketStr);
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
this.userName =
|
this.userName =
|
||||||
UserGroupInformation.getCurrentUser().getShortUserName();
|
UserGroupInformation.getCurrentUser().getShortUserName();
|
||||||
@ -142,7 +142,7 @@ public void initialize(URI name, Configuration conf) throws IOException {
|
|||||||
this.userName = OZONE_DEFAULT_USER;
|
this.userName = OZONE_DEFAULT_USER;
|
||||||
}
|
}
|
||||||
this.workingDir = new Path(OZONE_USER_DIR, this.userName)
|
this.workingDir = new Path(OZONE_USER_DIR, this.userName)
|
||||||
.makeQualified(this.uri, this.workingDir);
|
.makeQualified(this.uri, this.workingDir);
|
||||||
} catch (URISyntaxException ue) {
|
} catch (URISyntaxException ue) {
|
||||||
final String msg = "Invalid Ozone endpoint " + name;
|
final String msg = "Invalid Ozone endpoint " + name;
|
||||||
LOG.error(msg, ue);
|
LOG.error(msg, ue);
|
||||||
@ -153,7 +153,7 @@ public void initialize(URI name, Configuration conf) throws IOException {
|
|||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
try {
|
try {
|
||||||
ozoneClient.close();
|
adapter.close();
|
||||||
} finally {
|
} finally {
|
||||||
super.close();
|
super.close();
|
||||||
}
|
}
|
||||||
@ -179,7 +179,7 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return new FSDataInputStream(
|
return new FSDataInputStream(
|
||||||
new OzoneFSInputStream(bucket.readKey(key).getInputStream()));
|
new OzoneFSInputStream(adapter.createInputStream(key)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -200,19 +200,16 @@ public FSDataOutputStream create(Path f, FsPermission permission,
|
|||||||
throw new FileAlreadyExistsException(f + " already exists");
|
throw new FileAlreadyExistsException(f + " already exists");
|
||||||
}
|
}
|
||||||
LOG.trace("Overwriting file {}", f);
|
LOG.trace("Overwriting file {}", f);
|
||||||
deleteObject(key);
|
adapter.deleteObject(key);
|
||||||
}
|
}
|
||||||
} catch (FileNotFoundException ignored) {
|
} catch (FileNotFoundException ignored) {
|
||||||
// this means the file is not found
|
// this means the file is not found
|
||||||
}
|
}
|
||||||
|
|
||||||
OzoneOutputStream ozoneOutputStream =
|
|
||||||
bucket.createKey(key, 0, replicationType, replicationFactor,
|
|
||||||
new HashMap<>());
|
|
||||||
// We pass null to FSDataOutputStream so it won't count writes that
|
// We pass null to FSDataOutputStream so it won't count writes that
|
||||||
// are being buffered to a file
|
// are being buffered to a file
|
||||||
return new FSDataOutputStream(
|
return new FSDataOutputStream(
|
||||||
new OzoneFSOutputStream(ozoneOutputStream.getOutputStream()), null);
|
adapter.createKey(key), null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -236,7 +233,7 @@ public FSDataOutputStream createNonRecursive(Path path,
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FSDataOutputStream append(Path f, int bufferSize,
|
public FSDataOutputStream append(Path f, int bufferSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
throw new UnsupportedOperationException("append() Not implemented by the "
|
throw new UnsupportedOperationException("append() Not implemented by the "
|
||||||
+ getClass().getSimpleName() + " FileSystem implementation");
|
+ getClass().getSimpleName() + " FileSystem implementation");
|
||||||
}
|
}
|
||||||
@ -256,7 +253,7 @@ private class RenameIterator extends OzoneListingIterator {
|
|||||||
@Override
|
@Override
|
||||||
boolean processKey(String key) throws IOException {
|
boolean processKey(String key) throws IOException {
|
||||||
String newKeyName = dstKey.concat(key.substring(srcKey.length()));
|
String newKeyName = dstKey.concat(key.substring(srcKey.length()));
|
||||||
bucket.renameKey(key, newKeyName);
|
adapter.renameKey(key, newKeyName);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -264,7 +261,7 @@ boolean processKey(String key) throws IOException {
|
|||||||
/**
|
/**
|
||||||
* Check whether the source and destination path are valid and then perform
|
* Check whether the source and destination path are valid and then perform
|
||||||
* rename from source path to destination path.
|
* rename from source path to destination path.
|
||||||
*
|
* <p>
|
||||||
* The rename operation is performed by renaming the keys with src as prefix.
|
* The rename operation is performed by renaming the keys with src as prefix.
|
||||||
* For such keys the prefix is changed from src to dst.
|
* For such keys the prefix is changed from src to dst.
|
||||||
*
|
*
|
||||||
@ -361,6 +358,7 @@ public boolean rename(Path src, Path dst) throws IOException {
|
|||||||
|
|
||||||
private class DeleteIterator extends OzoneListingIterator {
|
private class DeleteIterator extends OzoneListingIterator {
|
||||||
private boolean recursive;
|
private boolean recursive;
|
||||||
|
|
||||||
DeleteIterator(Path f, boolean recursive)
|
DeleteIterator(Path f, boolean recursive)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
super(f);
|
super(f);
|
||||||
@ -379,7 +377,7 @@ boolean processKey(String key) throws IOException {
|
|||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
LOG.trace("deleting key:" + key);
|
LOG.trace("deleting key:" + key);
|
||||||
boolean succeed = deleteObject(key);
|
boolean succeed = adapter.deleteObject(key);
|
||||||
// if recursive delete is requested ignore the return value of
|
// if recursive delete is requested ignore the return value of
|
||||||
// deleteObject and issue deletes for other keys.
|
// deleteObject and issue deletes for other keys.
|
||||||
return recursive || succeed;
|
return recursive || succeed;
|
||||||
@ -390,6 +388,7 @@ boolean processKey(String key) throws IOException {
|
|||||||
/**
|
/**
|
||||||
* Deletes the children of the input dir path by iterating though the
|
* Deletes the children of the input dir path by iterating though the
|
||||||
* DeleteIterator.
|
* DeleteIterator.
|
||||||
|
*
|
||||||
* @param f directory path to be deleted
|
* @param f directory path to be deleted
|
||||||
* @return true if successfully deletes all required keys, false otherwise
|
* @return true if successfully deletes all required keys, false otherwise
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
@ -431,7 +430,7 @@ public boolean delete(Path f, boolean recursive) throws IOException {
|
|||||||
result = innerDelete(f, recursive);
|
result = innerDelete(f, recursive);
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("delete: Path is a file: {}", f);
|
LOG.debug("delete: Path is a file: {}", f);
|
||||||
result = deleteObject(key);
|
result = adapter.deleteObject(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result) {
|
if (result) {
|
||||||
@ -449,6 +448,7 @@ public boolean delete(Path f, boolean recursive) throws IOException {
|
|||||||
/**
|
/**
|
||||||
* Create a fake parent directory key if it does not already exist and no
|
* Create a fake parent directory key if it does not already exist and no
|
||||||
* other child of this parent directory exists.
|
* other child of this parent directory exists.
|
||||||
|
*
|
||||||
* @param f path to the fake parent directory
|
* @param f path to the fake parent directory
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@ -457,12 +457,13 @@ private void createFakeDirectoryIfNecessary(Path f) throws IOException {
|
|||||||
if (!key.isEmpty() && !o3Exists(f)) {
|
if (!key.isEmpty() && !o3Exists(f)) {
|
||||||
LOG.debug("Creating new fake directory at {}", f);
|
LOG.debug("Creating new fake directory at {}", f);
|
||||||
String dirKey = addTrailingSlashIfNeeded(key);
|
String dirKey = addTrailingSlashIfNeeded(key);
|
||||||
createDirectory(dirKey);
|
adapter.createDirectory(dirKey);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if a file or directory exists corresponding to given path.
|
* Check if a file or directory exists corresponding to given path.
|
||||||
|
*
|
||||||
* @param f path to file/directory.
|
* @param f path to file/directory.
|
||||||
* @return true if it exists, false otherwise.
|
* @return true if it exists, false otherwise.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
@ -487,7 +488,7 @@ private class ListStatusIterator extends OzoneListingIterator {
|
|||||||
new HashMap<>(LISTING_PAGE_SIZE);
|
new HashMap<>(LISTING_PAGE_SIZE);
|
||||||
private Path f; // the input path
|
private Path f; // the input path
|
||||||
|
|
||||||
ListStatusIterator(Path f) throws IOException {
|
ListStatusIterator(Path f) throws IOException {
|
||||||
super(f);
|
super(f);
|
||||||
this.f = f;
|
this.f = f;
|
||||||
}
|
}
|
||||||
@ -495,6 +496,7 @@ private class ListStatusIterator extends OzoneListingIterator {
|
|||||||
/**
|
/**
|
||||||
* Add the key to the listStatus result if the key corresponds to the
|
* Add the key to the listStatus result if the key corresponds to the
|
||||||
* input path or is an immediate child of the input path.
|
* input path or is an immediate child of the input path.
|
||||||
|
*
|
||||||
* @param key key to be processed
|
* @param key key to be processed
|
||||||
* @return always returns true
|
* @return always returns true
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
@ -518,7 +520,7 @@ boolean processKey(String key) throws IOException {
|
|||||||
if (pathToKey(keyPath.getParent()).equals(pathToKey(f))) {
|
if (pathToKey(keyPath.getParent()).equals(pathToKey(f))) {
|
||||||
// This key is an immediate child. Can be file or directory
|
// This key is an immediate child. Can be file or directory
|
||||||
if (key.endsWith(OZONE_URI_DELIMITER)) {
|
if (key.endsWith(OZONE_URI_DELIMITER)) {
|
||||||
// Key is a directory
|
// Key is a directory
|
||||||
addSubDirStatus(keyPath);
|
addSubDirStatus(keyPath);
|
||||||
} else {
|
} else {
|
||||||
addFileStatus(keyPath);
|
addFileStatus(keyPath);
|
||||||
@ -537,6 +539,7 @@ boolean processKey(String key) throws IOException {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds the FileStatus of keyPath to final result of listStatus.
|
* Adds the FileStatus of keyPath to final result of listStatus.
|
||||||
|
*
|
||||||
* @param filePath path to the file
|
* @param filePath path to the file
|
||||||
* @throws FileNotFoundException
|
* @throws FileNotFoundException
|
||||||
*/
|
*/
|
||||||
@ -547,6 +550,7 @@ void addFileStatus(Path filePath) throws IOException {
|
|||||||
/**
|
/**
|
||||||
* Adds the FileStatus of the subdir to final result of listStatus, if not
|
* Adds the FileStatus of the subdir to final result of listStatus, if not
|
||||||
* already included.
|
* already included.
|
||||||
|
*
|
||||||
* @param dirPath path to the dir
|
* @param dirPath path to the dir
|
||||||
* @throws FileNotFoundException
|
* @throws FileNotFoundException
|
||||||
*/
|
*/
|
||||||
@ -560,9 +564,9 @@ void addSubDirStatus(Path dirPath) throws FileNotFoundException {
|
|||||||
/**
|
/**
|
||||||
* Traverse the parent directory structure of keyPath to determine the
|
* Traverse the parent directory structure of keyPath to determine the
|
||||||
* which parent/ grand-parent/.. is the immediate child of the input path f.
|
* which parent/ grand-parent/.. is the immediate child of the input path f.
|
||||||
|
*
|
||||||
* @param keyPath path whose parent directory structure should be traversed.
|
* @param keyPath path whose parent directory structure should be traversed.
|
||||||
* @return immediate child path of the input path f.
|
* @return immediate child path of the input path f.
|
||||||
* @return immediate child path of the input path f.
|
|
||||||
*/
|
*/
|
||||||
Path getImmediateChildPath(Path keyPath) {
|
Path getImmediateChildPath(Path keyPath) {
|
||||||
Path path = keyPath;
|
Path path = keyPath;
|
||||||
@ -610,6 +614,7 @@ public Path getWorkingDirectory() {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the username of the FS.
|
* Get the username of the FS.
|
||||||
|
*
|
||||||
* @return the short name of the user who instantiated the FS
|
* @return the short name of the user who instantiated the FS
|
||||||
*/
|
*/
|
||||||
public String getUsername() {
|
public String getUsername() {
|
||||||
@ -648,7 +653,7 @@ private boolean mkdir(Path path) throws IOException {
|
|||||||
LOG.trace("creating directory for fpart:{}", fPart);
|
LOG.trace("creating directory for fpart:{}", fPart);
|
||||||
String key = pathToKey(fPart);
|
String key = pathToKey(fPart);
|
||||||
String dirKey = addTrailingSlashIfNeeded(key);
|
String dirKey = addTrailingSlashIfNeeded(key);
|
||||||
if (!createDirectory(dirKey)) {
|
if (!adapter.createDirectory(dirKey)) {
|
||||||
// Directory creation failed here,
|
// Directory creation failed here,
|
||||||
// rollback and delete newly created directories
|
// rollback and delete newly created directories
|
||||||
LOG.trace("Directory creation failed, path:{}", fPart);
|
LOG.trace("Directory creation failed, path:{}", fPart);
|
||||||
@ -682,11 +687,11 @@ public FileStatus getFileStatus(Path f) throws IOException {
|
|||||||
|
|
||||||
if (key.length() == 0) {
|
if (key.length() == 0) {
|
||||||
return new FileStatus(0, true, 1, 0,
|
return new FileStatus(0, true, 1, 0,
|
||||||
bucket.getCreationTime(), qualifiedPath);
|
adapter.getCreationTime(), qualifiedPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the key exists
|
// Check if the key exists
|
||||||
OzoneKey ozoneKey = getKeyInfo(key);
|
BasicKeyInfo ozoneKey = adapter.getKeyInfo(key);
|
||||||
if (ozoneKey != null) {
|
if (ozoneKey != null) {
|
||||||
LOG.debug("Found exact file for path {}: normal file", f);
|
LOG.debug("Found exact file for path {}: normal file", f);
|
||||||
return new FileStatus(ozoneKey.getDataSize(), false, 1,
|
return new FileStatus(ozoneKey.getDataSize(), false, 1,
|
||||||
@ -702,6 +707,7 @@ public FileStatus getFileStatus(Path f) throws IOException {
|
|||||||
* Get the FileStatus for input directory path.
|
* Get the FileStatus for input directory path.
|
||||||
* They key corresponding to input path is appended with a trailing slash
|
* They key corresponding to input path is appended with a trailing slash
|
||||||
* to return only the corresponding directory key in the bucket.
|
* to return only the corresponding directory key in the bucket.
|
||||||
|
*
|
||||||
* @param f directory path
|
* @param f directory path
|
||||||
* @return FileStatus for the input directory path
|
* @return FileStatus for the input directory path
|
||||||
* @throws FileNotFoundException
|
* @throws FileNotFoundException
|
||||||
@ -712,9 +718,9 @@ public FileStatus innerGetFileStatusForDir(Path f)
|
|||||||
String key = pathToKey(qualifiedPath);
|
String key = pathToKey(qualifiedPath);
|
||||||
key = addTrailingSlashIfNeeded(key);
|
key = addTrailingSlashIfNeeded(key);
|
||||||
|
|
||||||
OzoneKey ozoneKey = getKeyInfo(key);
|
BasicKeyInfo ozoneKey = adapter.getKeyInfo(key);
|
||||||
if(ozoneKey != null) {
|
if (ozoneKey != null) {
|
||||||
if (isDirectory(ozoneKey)) {
|
if (adapter.isDirectory(ozoneKey)) {
|
||||||
// Key is a directory
|
// Key is a directory
|
||||||
LOG.debug("Found file (with /) for path {}: fake directory", f);
|
LOG.debug("Found file (with /) for path {}: fake directory", f);
|
||||||
} else {
|
} else {
|
||||||
@ -730,7 +736,7 @@ public FileStatus innerGetFileStatusForDir(Path f)
|
|||||||
|
|
||||||
// File or directory corresponding to input path does not exist.
|
// File or directory corresponding to input path does not exist.
|
||||||
// Check if there exists a key prefixed with this key.
|
// Check if there exists a key prefixed with this key.
|
||||||
boolean hasChildren = bucket.listKeys(key).hasNext();
|
boolean hasChildren = adapter.hasNextKey(key);
|
||||||
if (hasChildren) {
|
if (hasChildren) {
|
||||||
return new FileStatus(0, true, 1, 0, 0, 0, FsPermission.getDirDefault(),
|
return new FileStatus(0, true, 1, 0, 0, 0, FsPermission.getDirDefault(),
|
||||||
getUsername(), getUsername(), qualifiedPath);
|
getUsername(), getUsername(), qualifiedPath);
|
||||||
@ -739,65 +745,6 @@ public FileStatus innerGetFileStatusForDir(Path f)
|
|||||||
throw new FileNotFoundException(f + ": No such file or directory!");
|
throw new FileNotFoundException(f + ": No such file or directory!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper method to fetch the key metadata info.
|
|
||||||
* @param key key whose metadata information needs to be fetched
|
|
||||||
* @return metadata info of the key
|
|
||||||
*/
|
|
||||||
private OzoneKey getKeyInfo(String key) {
|
|
||||||
try {
|
|
||||||
return bucket.getKey(key);
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.trace("Key:{} does not exist", key);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper method to check if an Ozone key is representing a directory.
|
|
||||||
* @param key key to be checked as a directory
|
|
||||||
* @return true if key is a directory, false otherwise
|
|
||||||
*/
|
|
||||||
private boolean isDirectory(OzoneKey key) {
|
|
||||||
LOG.trace("key name:{} size:{}", key.getName(),
|
|
||||||
key.getDataSize());
|
|
||||||
return key.getName().endsWith(OZONE_URI_DELIMITER)
|
|
||||||
&& (key.getDataSize() == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper method to create an directory specified by key name in bucket.
|
|
||||||
* @param keyName key name to be created as directory
|
|
||||||
* @return true if the key is created, false otherwise
|
|
||||||
*/
|
|
||||||
private boolean createDirectory(String keyName) {
|
|
||||||
try {
|
|
||||||
LOG.trace("creating dir for key:{}", keyName);
|
|
||||||
bucket.createKey(keyName, 0, replicationType, replicationFactor,
|
|
||||||
new HashMap<>()).close();
|
|
||||||
return true;
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
LOG.error("create key failed for key:{}", keyName, ioe);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper method to delete an object specified by key name in bucket.
|
|
||||||
* @param keyName key name to be deleted
|
|
||||||
* @return true if the key is deleted, false otherwise
|
|
||||||
*/
|
|
||||||
private boolean deleteObject(String keyName) {
|
|
||||||
LOG.trace("issuing delete for key" + keyName);
|
|
||||||
try {
|
|
||||||
bucket.deleteKey(keyName);
|
|
||||||
return true;
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
LOG.error("delete key failed " + ioe.getMessage());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Turn a path (relative or otherwise) into an Ozone key.
|
* Turn a path (relative or otherwise) into an Ozone key.
|
||||||
*
|
*
|
||||||
@ -805,7 +752,7 @@ private boolean deleteObject(String keyName) {
|
|||||||
* @return the key of the object that represents the file.
|
* @return the key of the object that represents the file.
|
||||||
*/
|
*/
|
||||||
public String pathToKey(Path path) {
|
public String pathToKey(Path path) {
|
||||||
Objects.requireNonNull(path, "Path can not be null!");
|
Objects.requireNonNull(path, "Path canf not be null!");
|
||||||
if (!path.isAbsolute()) {
|
if (!path.isAbsolute()) {
|
||||||
path = new Path(workingDir, path);
|
path = new Path(workingDir, path);
|
||||||
}
|
}
|
||||||
@ -839,17 +786,17 @@ public String toString() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class provides an interface to iterate through all the keys in the
|
* This class provides an interface to iterate through all the keys in the
|
||||||
* bucket prefixed with the input path key and process them.
|
* bucket prefixed with the input path key and process them.
|
||||||
*
|
* <p>
|
||||||
* Each implementing class should define how the keys should be processed
|
* Each implementing class should define how the keys should be processed
|
||||||
* through the processKey() function.
|
* through the processKey() function.
|
||||||
*/
|
*/
|
||||||
private abstract class OzoneListingIterator {
|
private abstract class OzoneListingIterator {
|
||||||
private final Path path;
|
private final Path path;
|
||||||
private final FileStatus status;
|
private final FileStatus status;
|
||||||
private String pathKey;
|
private String pathKey;
|
||||||
private Iterator<? extends OzoneKey> keyIterator;
|
private Iterator<BasicKeyInfo> keyIterator;
|
||||||
|
|
||||||
OzoneListingIterator(Path path)
|
OzoneListingIterator(Path path)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
@ -859,12 +806,13 @@ private abstract class OzoneListingIterator {
|
|||||||
if (status.isDirectory()) {
|
if (status.isDirectory()) {
|
||||||
this.pathKey = addTrailingSlashIfNeeded(pathKey);
|
this.pathKey = addTrailingSlashIfNeeded(pathKey);
|
||||||
}
|
}
|
||||||
keyIterator = bucket.listKeys(pathKey);
|
keyIterator = adapter.listKeys(pathKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The output of processKey determines if further iteration through the
|
* The output of processKey determines if further iteration through the
|
||||||
* keys should be done or not.
|
* keys should be done or not.
|
||||||
|
*
|
||||||
* @return true if we should continue iteration of keys, false otherwise.
|
* @return true if we should continue iteration of keys, false otherwise.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@ -876,6 +824,7 @@ private abstract class OzoneListingIterator {
|
|||||||
* If for any key, the processKey() returns false, then the iteration is
|
* If for any key, the processKey() returns false, then the iteration is
|
||||||
* stopped and returned with false indicating that all the keys could not
|
* stopped and returned with false indicating that all the keys could not
|
||||||
* be processed successfully.
|
* be processed successfully.
|
||||||
|
*
|
||||||
* @return true if all keys are processed successfully, false otherwise.
|
* @return true if all keys are processed successfully, false otherwise.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@ -884,7 +833,7 @@ boolean iterate() throws IOException {
|
|||||||
if (status.isDirectory()) {
|
if (status.isDirectory()) {
|
||||||
LOG.trace("Iterating directory:{}", pathKey);
|
LOG.trace("Iterating directory:{}", pathKey);
|
||||||
while (keyIterator.hasNext()) {
|
while (keyIterator.hasNext()) {
|
||||||
OzoneKey key = keyIterator.next();
|
BasicKeyInfo key = keyIterator.next();
|
||||||
LOG.trace("iterating key:{}", key.getName());
|
LOG.trace("iterating key:{}", key.getName());
|
||||||
if (!processKey(key.getName())) {
|
if (!processKey(key.getName())) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -11,11 +11,7 @@
|
|||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License. See accompanying LICENSE file.
|
limitations under the License. See accompanying LICENSE file.
|
||||||
-->
|
--><project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
|
|
||||||
http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
@ -43,8 +39,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||||||
<module>common</module>
|
<module>common</module>
|
||||||
<module>client</module>
|
<module>client</module>
|
||||||
<module>ozone-manager</module>
|
<module>ozone-manager</module>
|
||||||
<module>tools</module>
|
|
||||||
<module>ozonefs</module>
|
<module>ozonefs</module>
|
||||||
|
<module>ozonefs-lib</module>
|
||||||
|
<module>ozonefs-lib-legacy</module>
|
||||||
|
<module>tools</module>
|
||||||
<module>integration-test</module>
|
<module>integration-test</module>
|
||||||
<module>objectstore-service</module>
|
<module>objectstore-service</module>
|
||||||
<module>datanode</module>
|
<module>datanode</module>
|
||||||
@ -102,6 +100,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||||||
<artifactId>hadoop-ozone-filesystem</artifactId>
|
<artifactId>hadoop-ozone-filesystem</artifactId>
|
||||||
<version>${ozone.version}</version>
|
<version>${ozone.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem-lib</artifactId>
|
||||||
|
<version>${ozone.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
|
||||||
|
<version>${ozone.version}</version>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-ozone-integration-test</artifactId>
|
<artifactId>hadoop-ozone-integration-test</artifactId>
|
||||||
@ -207,6 +215,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdds-tools</artifactId>
|
<artifactId>hadoop-hdds-tools</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
@ -304,7 +313,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||||||
<groupId>org.codehaus.mojo</groupId>
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
<artifactId>findbugs-maven-plugin</artifactId>
|
<artifactId>findbugs-maven-plugin</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
<excludeFilterFile combine.self="override"></excludeFilterFile>
|
<excludeFilterFile combine.self="override"/>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
|
Loading…
Reference in New Issue
Block a user