HADOOP-19085. Compatibility Benchmark over HCFS Implementations
Contributed by Han Liu
This commit is contained in:
parent
783cc3eda0
commit
4d88f9892a
118
hadoop-tools/hadoop-compat-bench/pom.xml
Normal file
118
hadoop-tools/hadoop-compat-bench/pom.xml
Normal file
@ -0,0 +1,118 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>3.5.0-SNAPSHOT</version>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<artifactId>hadoop-compat-bench</artifactId>
|
||||
<version>3.5.0-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<description>Apache Hadoop Compatibility</description>
|
||||
<name>Apache Hadoop Compatibility Benchmark</name>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<!-- Should we keep this -->
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
|
||||
<!-- For test -->
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs</artifactId>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>org.apache.hadoop.fs.compat.HdfsCompatTool</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/main/resources</directory>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>shell</directory>
|
||||
</resource>
|
||||
</resources>
|
||||
</build>
|
||||
</project>
|
58
hadoop-tools/hadoop-compat-bench/shell/cases/attr.t
Normal file
58
hadoop-tools/hadoop-compat-bench/shell/cases/attr.t
Normal file
@ -0,0 +1,58 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
echo "1..10"
|
||||
|
||||
# 1. chown
|
||||
hadoop fs -chown "hadoop-compat-bench-user" "${baseDir}/dat"
|
||||
expect_out "chown" "user:hadoop-compat-bench-user" hadoop fs -stat "user:%u" "${baseDir}/dat"
|
||||
|
||||
# 2. chgrp
|
||||
hadoop fs -chgrp "hadoop-compat-bench-group" "${baseDir}/dat"
|
||||
expect_out "chgrp" "group:hadoop-compat-bench-group" hadoop fs -stat "group:%g" "${baseDir}/dat"
|
||||
|
||||
# 3. chmod
|
||||
hadoop fs -chmod 777 "${baseDir}/dat"
|
||||
expect_out "chmod" "perm:777" hadoop fs -stat "perm:%a" "${baseDir}/dat"
|
||||
|
||||
# 4. touch
|
||||
hadoop fs -touch -m -t "20000615:000000" "${baseDir}/dat"
|
||||
expect_out "touch" "date:2000-06-.*" hadoop fs -stat "date:%y" "${baseDir}/dat"
|
||||
|
||||
# 5. setfattr
|
||||
expect_ret "setfattr" 0 hadoop fs -setfattr -n "user.key" -v "value" "${baseDir}/dat"
|
||||
|
||||
# 6. getfattr
|
||||
expect_out "getfattr" ".*value.*" hadoop fs -getfattr -n "user.key" "${baseDir}/dat"
|
||||
|
||||
# 7. setfacl
|
||||
expect_ret "setfacl" 0 hadoop fs -setfacl -m "user:foo:---" "${baseDir}/dat"
|
||||
|
||||
# 8. getfacl
|
||||
expect_out "getfacl" ".*foo.*" hadoop fs -getfacl "${baseDir}/dat"
|
||||
|
||||
# 9. setrep
|
||||
hadoop fs -setrep 1 "${baseDir}/dat"
|
||||
expect_out "setrep" "replication:1" hadoop fs -stat "replication:%r" "${baseDir}/dat"
|
||||
|
||||
# 10. checksum
|
||||
expect_ret "checksum" 0 hadoop fs -checksum "${baseDir}/dat" # TODO
|
36
hadoop-tools/hadoop-compat-bench/shell/cases/concat.t
Normal file
36
hadoop-tools/hadoop-compat-bench/shell/cases/concat.t
Normal file
@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/src1"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/src2"
|
||||
|
||||
echo "1..3"
|
||||
|
||||
# 1. touchz
|
||||
hadoop fs -touchz "${baseDir}/dat"
|
||||
expect_out "touchz" "size:0" hadoop fs -stat "size:%b" "${baseDir}/dat"
|
||||
|
||||
# 2. concat
|
||||
expect_ret "concat" 0 hadoop fs -concat "${baseDir}/dat" "${baseDir}/src1" "${baseDir}/src2"
|
||||
# expect_out "size:26" hadoop fs -stat "size:%b" "${baseDir}/dat"
|
||||
|
||||
# 3. getmerge
|
||||
hadoop fs -getmerge "${baseDir}" "${localDir}/merged"
|
||||
expect_ret "getmerge" 0 test -s "${localDir}/merged"
|
33
hadoop-tools/hadoop-compat-bench/shell/cases/copy.t
Normal file
33
hadoop-tools/hadoop-compat-bench/shell/cases/copy.t
Normal file
@ -0,0 +1,33 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
|
||||
echo "1..3"
|
||||
|
||||
# 1. copyFromLocal
|
||||
expect_ret "copyFromLocal" 0 hadoop fs -copyFromLocal "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
# 2. cp
|
||||
hadoop fs -cp "${baseDir}/dat" "${baseDir}/dat2"
|
||||
expect_ret "cp" 0 hadoop fs -test -f "${baseDir}/dat2"
|
||||
|
||||
# 3. copyToLocal
|
||||
hadoop fs -copyToLocal "${baseDir}/dat2" "${localDir}/"
|
||||
expect_ret "copyToLocal" 0 test -f "${localDir}/dat2"
|
47
hadoop-tools/hadoop-compat-bench/shell/cases/directory.t
Normal file
47
hadoop-tools/hadoop-compat-bench/shell/cases/directory.t
Normal file
@ -0,0 +1,47 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
echo "1..8"
|
||||
|
||||
# 1. mkdir
|
||||
expect_ret "mkdir" 0 hadoop fs -mkdir -p "${baseDir}/dir/sub"
|
||||
|
||||
# 2. ls
|
||||
expect_lines "ls" 2 ".*dat.*" ".*dir.*" hadoop fs -ls "${baseDir}"
|
||||
|
||||
# 3. lsr
|
||||
expect_lines "lsr" 3 ".*dat.*" ".*dir.*" ".*sub.*" hadoop fs -lsr "${baseDir}"
|
||||
|
||||
# 4. count
|
||||
expect_out "count" ".*13.*" hadoop fs -count "${baseDir}"
|
||||
|
||||
# 5. du
|
||||
expect_out "du" ".*13.*" hadoop fs -du "${baseDir}"
|
||||
|
||||
# 6. dus
|
||||
expect_out "dus" ".*13.*" hadoop fs -dus "${baseDir}"
|
||||
|
||||
# 7. df
|
||||
expect_ret "df" 0 hadoop fs -df "${baseDir}"
|
||||
|
||||
# 8. find
|
||||
expect_out "find" ".*dat.*" hadoop fs -find "${baseDir}" -name "dat" -print
|
29
hadoop-tools/hadoop-compat-bench/shell/cases/fileinfo.t
Normal file
29
hadoop-tools/hadoop-compat-bench/shell/cases/fileinfo.t
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
echo "1..2"
|
||||
|
||||
# 1. stat
|
||||
expect_out "stat" "size:13" hadoop fs -stat "size:%b" "${baseDir}/dat"
|
||||
|
||||
# 2. test
|
||||
expect_ret "test" 0 hadoop fs -test -f "${baseDir}/dat"
|
33
hadoop-tools/hadoop-compat-bench/shell/cases/move.t
Normal file
33
hadoop-tools/hadoop-compat-bench/shell/cases/move.t
Normal file
@ -0,0 +1,33 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
|
||||
echo "1..2"
|
||||
|
||||
# 1. moveFromLocal
|
||||
expect_ret "moveFromLocal" 0 hadoop fs -moveFromLocal "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
# 2. mv
|
||||
hadoop fs -mv "${baseDir}/dat" "${baseDir}/dat2"
|
||||
expect_ret "mv" 0 hadoop fs -test -f "${baseDir}/dat2"
|
||||
|
||||
# moveToLocal is not achieved on HDFS
|
||||
# hadoop fs -moveToLocal "${baseDir}/dat2" "${localDir}/"
|
||||
# expect_ret "moveToLocal" 0 test -f "${localDir}/dat2"
|
39
hadoop-tools/hadoop-compat-bench/shell/cases/read.t
Normal file
39
hadoop-tools/hadoop-compat-bench/shell/cases/read.t
Normal file
@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
echo "1..5"
|
||||
|
||||
# 1. get
|
||||
hadoop fs -get "${baseDir}/dat" "${localDir}/"
|
||||
expect_ret "get" 0 test -f "${localDir}/dat"
|
||||
|
||||
# 2. cat
|
||||
expect_out "cat" "Hello World!" hadoop fs -cat "${baseDir}/dat"
|
||||
|
||||
# 3. text
|
||||
expect_out "text" "Hello World!" hadoop fs -text "${baseDir}/dat"
|
||||
|
||||
# 4. head
|
||||
expect_out "head" "Hello World!" hadoop fs -head "${baseDir}/dat"
|
||||
|
||||
# 5. tail
|
||||
expect_out "tail" "Hello World!" hadoop fs -tail "${baseDir}/dat"
|
40
hadoop-tools/hadoop-compat-bench/shell/cases/remove.t
Normal file
40
hadoop-tools/hadoop-compat-bench/shell/cases/remove.t
Normal file
@ -0,0 +1,40 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
hadoop fs -mkdir -p "${baseDir}/dir/sub"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/dir/"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/dir/sub/"
|
||||
|
||||
echo "1..4"
|
||||
|
||||
# 1. rm
|
||||
hadoop fs -rm -f -skipTrash "${baseDir}/dir/dat"
|
||||
expect_ret "rm" 1 hadoop fs -test -e "${baseDir}/dir/dat"
|
||||
|
||||
# 2. rmr
|
||||
hadoop fs -rmr "${baseDir}/dir/sub"
|
||||
expect_ret "rmr" 1 hadoop fs -test -e "${baseDir}/dir/sub"
|
||||
|
||||
# 3. rmdir
|
||||
hadoop fs -rmdir "${baseDir}/dir"
|
||||
expect_ret "rmdir" 1 hadoop fs -test -e "${baseDir}/dir"
|
||||
|
||||
# 4. expunge
|
||||
expect_ret "expunge" 0 hadoop fs -expunge -immediate -fs "${baseDir}"
|
29
hadoop-tools/hadoop-compat-bench/shell/cases/snapshot.t
Normal file
29
hadoop-tools/hadoop-compat-bench/shell/cases/snapshot.t
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "1..3"
|
||||
|
||||
# 1. createSnapshot
|
||||
expect_out "createSnapshot" "Created snapshot .*" hdfs dfs -createSnapshot "${snapshotDir}" "s-name"
|
||||
|
||||
# 2. renameSnapshot
|
||||
expect_ret "renameSnapshot" 0 hdfs dfs -renameSnapshot "${snapshotDir}" "s-name" "d-name"
|
||||
|
||||
# 3. deleteSnapshot
|
||||
expect_ret "deleteSnapshot" 0 hdfs dfs -deleteSnapshot "${snapshotDir}" "d-name"
|
38
hadoop-tools/hadoop-compat-bench/shell/cases/storagePolicy.t
Normal file
38
hadoop-tools/hadoop-compat-bench/shell/cases/storagePolicy.t
Normal file
@ -0,0 +1,38 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
hadoop fs -put "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
echo "1..5"
|
||||
|
||||
# 1. listPolicies
|
||||
expect_ret "listPolicies" 0 hdfs storagepolicies -Dfs.defaultFS="${baseDir}" -listPolicies
|
||||
|
||||
# 2. setStoragePolicy
|
||||
expect_out "setStoragePolicy" "Set storage policy ${storagePolicy} .*" hdfs storagepolicies -setStoragePolicy -path "${baseDir}" -policy "${storagePolicy}"
|
||||
|
||||
# 3. getStoragePolicy
|
||||
expect_out "getStoragePolicy" ".*${storagePolicy}.*" hdfs storagepolicies -getStoragePolicy -path "${baseDir}"
|
||||
|
||||
# 4. satisfyStoragePolicy
|
||||
expect_out "satisfyStoragePolicy" "Scheduled blocks to move .*" hdfs storagepolicies -satisfyStoragePolicy -path "${baseDir}"
|
||||
|
||||
# 5. unsetStoragePolicy
|
||||
expect_out "unsetStoragePolicy" "Unset storage policy .*" hdfs storagepolicies -unsetStoragePolicy -path "${baseDir}"
|
31
hadoop-tools/hadoop-compat-bench/shell/cases/write.t
Normal file
31
hadoop-tools/hadoop-compat-bench/shell/cases/write.t
Normal file
@ -0,0 +1,31 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
|
||||
echo "1..3"
|
||||
|
||||
# 1. put
|
||||
expect_ret "put" 0 hadoop fs -put "${localDir}/dat" "${baseDir}/"
|
||||
|
||||
# 2. appendToFile
|
||||
expect_ret "appendToFile" 0 hadoop fs -appendToFile "${localDir}/dat" "${baseDir}/dat"
|
||||
|
||||
# 3. truncate
|
||||
expect_ret "truncate" 0 hadoop fs -truncate 13 "${baseDir}/dat"
|
181
hadoop-tools/hadoop-compat-bench/shell/misc.sh
Normal file
181
hadoop-tools/hadoop-compat-bench/shell/misc.sh
Normal file
@ -0,0 +1,181 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ntest=1
|
||||
fname="$0"
|
||||
|
||||
prepare() {
|
||||
BASE_URI="${HADOOP_COMPAT_BASE_URI}"
|
||||
LOCAL_URI="${HADOOP_COMPAT_LOCAL_URI}"
|
||||
SNAPSHOT_URI="${HADOOP_COMPAT_SNAPSHOT_URI}"
|
||||
STORAGE_POLICY="${HADOOP_COMPAT_STORAGE_POLICY}"
|
||||
STDOUT_DIR="${HADOOP_COMPAT_STDOUT_DIR}"
|
||||
PASS_FILE="${HADOOP_COMPAT_PASS_FILE}"
|
||||
FAIL_FILE="${HADOOP_COMPAT_FAIL_FILE}"
|
||||
SKIP_FILE="${HADOOP_COMPAT_SKIP_FILE}"
|
||||
|
||||
export baseDir="${BASE_URI}/${fname}"
|
||||
export localDir="${LOCAL_URI}/${fname}"
|
||||
export snapshotDir="${SNAPSHOT_URI}"
|
||||
export storagePolicy="${STORAGE_POLICY}"
|
||||
stdoutDir="${STDOUT_DIR}/${fname}/stdout"
|
||||
stderrDir="${STDOUT_DIR}/${fname}/stderr"
|
||||
mkdir -p "${stdoutDir}"
|
||||
mkdir -p "${stderrDir}"
|
||||
mkdir -p "${localDir}"
|
||||
hadoop fs -mkdir -p "${baseDir}"
|
||||
}
|
||||
|
||||
expect_ret() { (
|
||||
cname="${1}"
|
||||
shift
|
||||
expect="${1}"
|
||||
shift
|
||||
|
||||
stdout="${stdoutDir}/${ntest}"
|
||||
stderr="${stderrDir}/${ntest}"
|
||||
"$@" 1>"${stdout}" 2>"${stderr}"
|
||||
result="$?"
|
||||
|
||||
if should_skip "${stderr}"; then
|
||||
skip_case "${cname}"
|
||||
else
|
||||
if [ X"${result}" = X"${expect}" ]; then
|
||||
pass_case "${cname}"
|
||||
else
|
||||
fail_case "${cname}"
|
||||
fi
|
||||
fi
|
||||
)
|
||||
ntest=$((ntest + 1))
|
||||
}
|
||||
|
||||
expect_out() { (
|
||||
cname="${1}"
|
||||
shift
|
||||
expect="${1}"
|
||||
shift
|
||||
|
||||
stdout="${stdoutDir}/${ntest}"
|
||||
stderr="${stderrDir}/${ntest}"
|
||||
"$@" 1>"${stdout}" 2>"${stderr}"
|
||||
|
||||
if should_skip "${stderr}"; then
|
||||
skip_case "${cname}"
|
||||
else
|
||||
if grep -Eq '^'"${expect}"'$' "${stdout}"; then
|
||||
pass_case "${cname}"
|
||||
else
|
||||
fail_case "${cname}"
|
||||
fi
|
||||
fi
|
||||
)
|
||||
ntest=$((ntest + 1))
|
||||
}
|
||||
|
||||
expect_lines() { (
|
||||
cname="${1}"
|
||||
shift
|
||||
lineNum="${1}"
|
||||
shift
|
||||
lines=$(expect_lines_parse "${lineNum}" "$@")
|
||||
shift "${lineNum}"
|
||||
|
||||
stdout="${stdoutDir}/${ntest}"
|
||||
stderr="${stderrDir}/${ntest}"
|
||||
"$@" 1>"${stdout}" 2>"${stderr}"
|
||||
|
||||
if should_skip "${stderr}"; then
|
||||
skip_case "${cname}"
|
||||
else
|
||||
lineCount="0"
|
||||
while read -r line; do
|
||||
case "${line}" in
|
||||
*"Found"*"items"*)
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
selectedLine=$(expect_lines_select "${lines}" "${lineCount}")
|
||||
if ! echo "${line}" | grep -Eq '^'"${selectedLine}"'$'; then
|
||||
lineCount="-1"
|
||||
break
|
||||
else
|
||||
lineCount=$((lineCount + 1))
|
||||
shift
|
||||
fi
|
||||
done <"${stdout}"
|
||||
if [ "${lineCount}" -eq "${lineNum}" ]; then
|
||||
pass_case "${cname}"
|
||||
else
|
||||
fail_case "${cname}"
|
||||
fi
|
||||
fi
|
||||
)
|
||||
ntest=$((ntest + 1))
|
||||
}
|
||||
|
||||
expect_lines_parse() {
|
||||
for _ in $(seq 1 "${1}"); do
|
||||
shift
|
||||
echo "${1}"
|
||||
done
|
||||
}
|
||||
|
||||
expect_lines_select() {
|
||||
lineSelector="0"
|
||||
echo "${1}" | while read -r splittedLine; do
|
||||
if [ "${lineSelector}" -eq "${2}" ]; then
|
||||
echo "${splittedLine}"
|
||||
return
|
||||
fi
|
||||
lineSelector=$((lineSelector + 1))
|
||||
done
|
||||
echo ""
|
||||
}
|
||||
|
||||
is_hadoop_shell() {
|
||||
if [ X"${1}" = X"hadoop" ] || [ X"${1}" = X"hdfs" ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
should_skip() {
|
||||
if grep -q "Unknown command" "${1}" || grep -q "Illegal option" "${1}"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
pass_case() {
|
||||
echo "ok ${ntest}"
|
||||
echo "${fname} - #${ntest} ${1}" >> "${PASS_FILE}"
|
||||
}
|
||||
|
||||
fail_case() {
|
||||
echo "not ok ${ntest}"
|
||||
echo "${fname} - #${ntest} ${1}" >> "${FAIL_FILE}"
|
||||
}
|
||||
|
||||
skip_case() {
|
||||
echo "ok ${ntest}"
|
||||
echo "${fname} - #${ntest} ${1}" >> "${SKIP_FILE}"
|
||||
}
|
||||
|
||||
prepare
|
@ -0,0 +1,250 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatCommand;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatIllegalArgumentException;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatReport;
|
||||
import org.apache.hadoop.fs.shell.CommandFormat;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Tool for triggering a compatibility report
|
||||
* for a specific FileSystem implementation.
|
||||
*/
|
||||
public class HdfsCompatTool extends Configured implements Tool {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HdfsCompatTool.class);
|
||||
|
||||
private static final String DESCRIPTION = "hadoop jar" +
|
||||
" hadoop-compat-bench-{version}.jar -uri <uri>" +
|
||||
" [-suite <suiteName>] [-output <outputFile>]:\n" +
|
||||
"\tTrigger a compatibility assessment" +
|
||||
" for a specific FileSystem implementation.\n" +
|
||||
"\tA compatibility report is generated after the command finished," +
|
||||
" showing how many interfaces/functions are implemented" +
|
||||
" and compatible with HDFS definition.\n" +
|
||||
"\t-uri is required to determine the target FileSystem.\n" +
|
||||
"\t-suite is optional for limiting the assessment to a subset." +
|
||||
" For example, 'shell' means only shell commands.\n" +
|
||||
"\t-output is optional for a detailed report," +
|
||||
" which should be a local file path if provided.";
|
||||
|
||||
private final PrintStream out; // Stream for printing command output
|
||||
private final PrintStream err; // Stream for printing error
|
||||
private String uri = null;
|
||||
private String suite = null;
|
||||
private String output = null;
|
||||
|
||||
public HdfsCompatTool(Configuration conf) {
|
||||
this(conf, System.out, System.err);
|
||||
}
|
||||
|
||||
public HdfsCompatTool(Configuration conf, PrintStream out, PrintStream err) {
|
||||
super(conf);
|
||||
this.out = out;
|
||||
this.err = err;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int run(final String[] args) throws Exception {
|
||||
try {
|
||||
return UserGroupInformation.getCurrentUser().doAs(
|
||||
new PrivilegedExceptionAction<Integer>() {
|
||||
@Override
|
||||
public Integer run() {
|
||||
return runImpl(args);
|
||||
}
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main method that runs the tool for given arguments.
|
||||
*
|
||||
* @param args arguments
|
||||
* @return return status of the command
|
||||
*/
|
||||
private int runImpl(String[] args) {
|
||||
if (isHelp(args)) {
|
||||
printUsage();
|
||||
return 0;
|
||||
}
|
||||
try {
|
||||
parseArgs(args);
|
||||
return doRun();
|
||||
} catch (Exception e) {
|
||||
printError(e.getMessage());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
private int doRun() throws Exception {
|
||||
HdfsCompatCommand cmd = new HdfsCompatCommand(uri, suite, getConf());
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
OutputStream outputFile = null;
|
||||
try {
|
||||
if (this.output != null) {
|
||||
outputFile = new FileOutputStream(new File(this.output));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error("Create output file failed", e);
|
||||
outputFile = null;
|
||||
}
|
||||
try {
|
||||
printReport(report, outputFile);
|
||||
} finally {
|
||||
IOUtils.closeStream(outputFile);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
private boolean isHelp(String[] args) {
|
||||
if ((args == null) || (args.length == 0)) {
|
||||
return true;
|
||||
}
|
||||
return (args.length == 1) && (args[0].equalsIgnoreCase("-h") ||
|
||||
args[0].equalsIgnoreCase("--help"));
|
||||
}
|
||||
|
||||
private void parseArgs(String[] args) {
|
||||
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE);
|
||||
cf.addOptionWithValue("uri");
|
||||
cf.addOptionWithValue("suite");
|
||||
cf.addOptionWithValue("output");
|
||||
cf.parse(args, 0);
|
||||
this.uri = cf.getOptValue("uri");
|
||||
this.suite = cf.getOptValue("suite");
|
||||
this.output = cf.getOptValue("output");
|
||||
if (isEmpty(this.uri)) {
|
||||
throw new HdfsCompatIllegalArgumentException("-uri is not specified.");
|
||||
}
|
||||
if (isEmpty(this.suite)) {
|
||||
this.suite = "ALL";
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isEmpty(final String value) {
|
||||
return (value == null) || value.isEmpty();
|
||||
}
|
||||
|
||||
private void printError(String message) {
|
||||
err.println(message);
|
||||
}
|
||||
|
||||
private void printOut(String message) {
|
||||
out.println(message);
|
||||
}
|
||||
|
||||
public void printReport(HdfsCompatReport report, OutputStream detailStream)
|
||||
throws IOException {
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
|
||||
// Line 1:
|
||||
buffer.append("Hadoop Compatibility Report for ");
|
||||
buffer.append(report.getSuite().getSuiteName());
|
||||
buffer.append(":\n");
|
||||
|
||||
// Line 2:
|
||||
long passed = report.getPassedCase().size();
|
||||
long failed = report.getFailedCase().size();
|
||||
String percent = (failed == 0) ? "100" : String.format("%.2f",
|
||||
((double) passed) / ((double) (passed + failed)) * 100);
|
||||
buffer.append("\t");
|
||||
buffer.append(percent);
|
||||
buffer.append("%, PASSED ");
|
||||
buffer.append(passed);
|
||||
buffer.append(" OVER ");
|
||||
buffer.append(passed + failed);
|
||||
buffer.append("\n");
|
||||
|
||||
// Line 3:
|
||||
buffer.append("\tURI: ");
|
||||
buffer.append(report.getUri());
|
||||
if (report.getSuite() != null) {
|
||||
buffer.append(" (suite: ");
|
||||
buffer.append(report.getSuite().getClass().getName());
|
||||
buffer.append(")");
|
||||
}
|
||||
buffer.append("\n");
|
||||
|
||||
// Line 4:
|
||||
buffer.append("\tHadoop Version as Baseline: ");
|
||||
buffer.append(VersionInfo.getVersion());
|
||||
|
||||
final String shortMessage = buffer.toString();
|
||||
printOut(shortMessage);
|
||||
|
||||
if (detailStream != null) {
|
||||
detailStream.write(shortMessage.getBytes(StandardCharsets.UTF_8));
|
||||
BufferedWriter writer = new BufferedWriter(
|
||||
new OutputStreamWriter(detailStream, StandardCharsets.UTF_8));
|
||||
writer.newLine();
|
||||
writer.write("PASSED CASES:");
|
||||
writer.newLine();
|
||||
Collection<String> cases = report.getPassedCase();
|
||||
for (String c : cases) {
|
||||
writer.write('\t');
|
||||
writer.write(c);
|
||||
writer.newLine();
|
||||
writer.flush();
|
||||
}
|
||||
writer.write("FAILED CASES:");
|
||||
writer.newLine();
|
||||
cases = report.getFailedCase();
|
||||
for (String c : cases) {
|
||||
writer.write('\t');
|
||||
writer.write(c);
|
||||
writer.newLine();
|
||||
writer.flush();
|
||||
}
|
||||
writer.flush();
|
||||
}
|
||||
}
|
||||
|
||||
private void printUsage() {
|
||||
printError(DESCRIPTION);
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
int res = ToolRunner.run(new HdfsCompatTool(new Configuration()), args);
|
||||
System.exit(res);
|
||||
}
|
||||
}
|
@ -0,0 +1,120 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "ACL")
|
||||
public class HdfsCompatAcl extends AbstractHdfsCompatCase {
|
||||
private static final String INIT_FILE_ACL =
|
||||
"user::rwx,group::rwx,other::rwx,user:foo:rwx";
|
||||
private static final String INIT_DIR_ACL =
|
||||
"default:user::rwx,default:group::rwx,default:other::rwx";
|
||||
private Path dir;
|
||||
private Path file;
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
this.dir = makePath("dir");
|
||||
this.file = new Path(this.dir, "file");
|
||||
HdfsCompatUtil.createFile(fs(), this.file, 0);
|
||||
List<AclEntry> entries = AclEntry.parseAclSpec(INIT_DIR_ACL, true);
|
||||
fs().setAcl(dir, entries);
|
||||
entries = AclEntry.parseAclSpec(INIT_FILE_ACL, true);
|
||||
fs().setAcl(file, entries);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() throws IOException {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.dir, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void modifyAclEntries() throws IOException {
|
||||
List<AclEntry> entries = AclEntry.parseAclSpec("user:foo:---", true);
|
||||
fs().modifyAclEntries(file, entries);
|
||||
List<AclEntry> acls = fs().getAclStatus(file).getEntries();
|
||||
long count = 0;
|
||||
for (AclEntry acl : acls) {
|
||||
if ("foo".equals(acl.getName())) {
|
||||
++count;
|
||||
Assert.assertEquals(FsAction.NONE, acl.getPermission());
|
||||
}
|
||||
}
|
||||
Assert.assertEquals(1, count);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeAclEntries() throws IOException {
|
||||
List<AclEntry> entries = AclEntry.parseAclSpec("user:bar:---", true);
|
||||
fs().modifyAclEntries(file, entries);
|
||||
entries = AclEntry.parseAclSpec("user:foo:---", true);
|
||||
fs().removeAclEntries(file, entries);
|
||||
List<AclEntry> acls = fs().getAclStatus(file).getEntries();
|
||||
Assert.assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName())));
|
||||
Assert.assertTrue(acls.stream().anyMatch(e -> "bar".equals(e.getName())));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeDefaultAcl() throws IOException {
|
||||
fs().removeDefaultAcl(dir);
|
||||
List<AclEntry> acls = fs().getAclStatus(dir).getEntries();
|
||||
Assert.assertTrue(acls.stream().noneMatch(
|
||||
e -> (e.getScope() == AclEntryScope.DEFAULT)));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeAcl() throws IOException {
|
||||
fs().removeAcl(file);
|
||||
List<AclEntry> acls = fs().getAclStatus(file).getEntries();
|
||||
Assert.assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName())));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setAcl() throws IOException {
|
||||
List<AclEntry> acls = fs().getAclStatus(file).getEntries();
|
||||
Assert.assertTrue(acls.stream().anyMatch(e -> "foo".equals(e.getName())));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getAclStatus() throws IOException {
|
||||
AclStatus status = fs().getAclStatus(dir);
|
||||
Assert.assertFalse(status.getOwner().isEmpty());
|
||||
Assert.assertFalse(status.getGroup().isEmpty());
|
||||
List<AclEntry> acls = status.getEntries();
|
||||
Assert.assertTrue(acls.stream().anyMatch(e ->
|
||||
e.getScope() == AclEntryScope.DEFAULT));
|
||||
|
||||
status = fs().getAclStatus(file);
|
||||
Assert.assertFalse(status.getOwner().isEmpty());
|
||||
Assert.assertFalse(status.getGroup().isEmpty());
|
||||
acls = status.getEntries();
|
||||
Assert.assertTrue(acls.stream().anyMatch(e ->
|
||||
e.getScope() == AclEntryScope.ACCESS));
|
||||
}
|
||||
}
|
@ -0,0 +1,733 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "FileSystem")
|
||||
public class HdfsCompatBasics extends AbstractHdfsCompatCase {
|
||||
@HdfsCompatCase
|
||||
public void initialize() throws IOException {
|
||||
FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
another.initialize(URI.create("hdfs:///"), new Configuration())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getScheme() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getScheme()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getUri() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getUri()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getCanonicalServiceName() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getCanonicalServiceName()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getName() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getName()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void makeQualified() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().makeQualified(new Path("/"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getChildFileSystems() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getChildFileSystems()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void resolvePath() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().resolvePath(new Path("/"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getHomeDirectory() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getHomeDirectory()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setWorkingDirectory() throws IOException {
|
||||
FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
another.setWorkingDirectory(makePath("/tmp"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getWorkingDirectory() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getWorkingDirectory()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void close() throws IOException {
|
||||
FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
|
||||
HdfsCompatUtil.checkImplementation(another::close);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getDefaultBlockSize() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getDefaultBlockSize(getBasePath())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getDefaultReplication() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getDefaultReplication(getBasePath())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getStorageStatistics() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getStorageStatistics()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setVerifyChecksum() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setVerifyChecksum(true)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setWriteChecksum() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setWriteChecksum(true)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getDelegationToken() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getDelegationToken("hadoop")
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getAdditionalTokenIssuers() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getAdditionalTokenIssuers()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getServerDefaults() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getServerDefaults(new Path("/"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void msync() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().msync()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getStatus(new Path("/"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getTrashRoot() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getTrashRoot(new Path("/user/hadoop/tmp"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getTrashRoots() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getTrashRoots(true)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getAllStoragePolicies() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getAllStoragePolicies()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void supportsSymlinks() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().supportsSymlinks()
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void hasPathCapability() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().hasPathCapability(getBasePath(),
|
||||
CommonPathCapabilities.FS_TRUNCATE)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void mkdirs() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().mkdirs(makePath("mkdir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getFileStatus(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void exists() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().exists(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void isDirectory() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().isDirectory(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void isFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().isFile(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getLength() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getLength(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getBlockSize() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getBlockSize(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().listStatus(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void globStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().globStatus(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listLocatedStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().listLocatedStatus(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listStatusIterator() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().listStatusIterator(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listFiles() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().listFiles(makePath("dir"), false)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void rename() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().rename(makePath("src"), makePath("dst"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void delete() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().delete(makePath("file"), true)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void deleteOnExit() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().deleteOnExit(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void cancelDeleteOnExit() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().cancelDeleteOnExit(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void truncate() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().truncate(makePath("file"), 1)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setOwner() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setOwner(makePath("file"), "test-user", "test-group")
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setTimes() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setTimes(makePath("file"), 1696089600L, 1696089600L)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void concat() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().concat(makePath("file"),
|
||||
new Path[]{makePath("file1"), makePath("file2")})
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileChecksum() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getFileChecksum(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileBlockLocations() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getFileBlockLocations(new FileStatus(), 0, 128)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listCorruptFileBlocks() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().listCorruptFileBlocks(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getReplication() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getReplication(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setReplication() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setReplication(makePath("file"), (short) 2)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getPathHandle() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getPathHandle(new FileStatus())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void create() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().create(makePath("file"), true)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createNonRecursive() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().createNonRecursive(makePath("file"), true, 1024,
|
||||
(short) 1, 1048576, null)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createNewFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().createNewFile(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void append() throws IOException {
|
||||
final Path file = makePath("file");
|
||||
try {
|
||||
HdfsCompatUtil.createFile(fs(), file, 0);
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().append(file)
|
||||
);
|
||||
} finally {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), file, true);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().createFile(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void appendFile() throws IOException {
|
||||
final Path file = makePath("file");
|
||||
try {
|
||||
HdfsCompatUtil.createFile(fs(), file, 0);
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().appendFile(file)
|
||||
);
|
||||
} finally {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), file, true);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createMultipartUploader() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().createMultipartUploader(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void open() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().open(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void openFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().openFile(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getContentSummary() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getContentSummary(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getUsed() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getUsed(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getQuotaUsage() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getQuotaUsage(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setQuota() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setQuota(makePath("dir"), 1024L, 1048576L)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setQuotaByStorageType() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setQuotaByStorageType(makePath("dir"), StorageType.SSD, 1048576L)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void access() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().access(makePath("file"), FsAction.EXECUTE)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setPermission() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setPermission(makePath("file"), FsPermission.getDefault())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createSymlink() {
|
||||
FileSystem.enableSymlinks();
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().createSymlink(makePath("file"), makePath("link"), true)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileLinkStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getFileLinkStatus(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getLinkTarget() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getLinkTarget(makePath("link"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void modifyAclEntries() {
|
||||
List<AclEntry> entries = AclEntry.parseAclSpec("user:foo:---", true);
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().modifyAclEntries(makePath("modifyAclEntries"), entries)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeAclEntries() {
|
||||
List<AclEntry> entries = AclEntry.parseAclSpec("user:foo:---", true);
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().removeAclEntries(makePath("removeAclEntries"), entries)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeDefaultAcl() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().removeDefaultAcl(makePath("removeDefaultAcl"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeAcl() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().removeAcl(makePath("removeAcl"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setAcl() {
|
||||
List<AclEntry> entries = AclEntry.parseAclSpec("user:foo:---", true);
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setAcl(makePath("setAcl"), entries)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getAclStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getAclStatus(makePath("getAclStatus"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setXAttr() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setXAttr(makePath("file"), "test-xattr",
|
||||
"test-value".getBytes(StandardCharsets.UTF_8))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getXAttr() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getXAttr(makePath("file"), "test-xattr")
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getXAttrs() {
|
||||
List<String> names = new ArrayList<>();
|
||||
names.add("test-xattr");
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getXAttrs(makePath("file"), names)
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listXAttrs() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().listXAttrs(makePath("file"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeXAttr() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().removeXAttr(makePath("file"), "test-xattr")
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setStoragePolicy() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setStoragePolicy(makePath("dir"), "COLD")
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void unsetStoragePolicy() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().unsetStoragePolicy(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void satisfyStoragePolicy() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().satisfyStoragePolicy(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getStoragePolicy() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getStoragePolicy(makePath("dir"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void copyFromLocalFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().copyFromLocalFile(makePath("src"), makePath("dst"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void moveFromLocalFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().moveFromLocalFile(makePath("src"), makePath("dst"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void copyToLocalFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().copyToLocalFile(makePath("src"), makePath("dst"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void moveToLocalFile() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().moveToLocalFile(makePath("src"), makePath("dst"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void startLocalOutput() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().startLocalOutput(makePath("out"), makePath("tmp"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void completeLocalOutput() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().completeLocalOutput(makePath("out"), makePath("tmp"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createSnapshot() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().createSnapshot(makePath("file"), "s_name")
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void renameSnapshot() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().renameSnapshot(makePath("file"), "s_name", "n_name")
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void deleteSnapshot() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().deleteSnapshot(makePath("file"), "s_name")
|
||||
);
|
||||
}
|
||||
}
|
@ -0,0 +1,153 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "Create")
|
||||
public class HdfsCompatCreate extends AbstractHdfsCompatCase {
|
||||
private Path path;
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() {
|
||||
this.path = makePath("path");
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.path, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void mkdirs() throws IOException {
|
||||
fs().mkdirs(path);
|
||||
Assert.assertTrue(fs().exists(path));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void create() throws IOException {
|
||||
FSDataOutputStream out = null;
|
||||
try {
|
||||
out = fs().create(path, true);
|
||||
Assert.assertTrue(fs().exists(path));
|
||||
} finally {
|
||||
IOUtils.closeStream(out);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createNonRecursive() {
|
||||
Path file = new Path(path, "file-no-parent");
|
||||
try {
|
||||
fs().createNonRecursive(file, true, 1024, (short) 1, 1048576, null);
|
||||
Assert.fail("Should fail since parent does not exist");
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createNewFile() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), path, 0);
|
||||
Assert.assertFalse(fs().createNewFile(path));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void append() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), path, 128);
|
||||
FSDataOutputStream out = null;
|
||||
byte[] data = new byte[64];
|
||||
try {
|
||||
out = fs().append(path);
|
||||
out.write(data);
|
||||
out.close();
|
||||
out = null;
|
||||
FileStatus fileStatus = fs().getFileStatus(path);
|
||||
Assert.assertEquals(128 + 64, fileStatus.getLen());
|
||||
} finally {
|
||||
IOUtils.closeStream(out);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createFile() throws IOException {
|
||||
FSDataOutputStream out = null;
|
||||
fs().mkdirs(path);
|
||||
final Path file = new Path(path, "file");
|
||||
try {
|
||||
FSDataOutputStreamBuilder builder = fs().createFile(file);
|
||||
out = builder.blockSize(1048576 * 2).build();
|
||||
out.write("Hello World!".getBytes(StandardCharsets.UTF_8));
|
||||
out.close();
|
||||
out = null;
|
||||
Assert.assertTrue(fs().exists(file));
|
||||
} finally {
|
||||
IOUtils.closeStream(out);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void appendFile() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), path, 128);
|
||||
FSDataOutputStream out = null;
|
||||
byte[] data = new byte[64];
|
||||
try {
|
||||
FSDataOutputStreamBuilder builder = fs().appendFile(path);
|
||||
out = builder.build();
|
||||
out.write(data);
|
||||
out.close();
|
||||
out = null;
|
||||
FileStatus fileStatus = fs().getFileStatus(path);
|
||||
Assert.assertEquals(128 + 64, fileStatus.getLen());
|
||||
} finally {
|
||||
IOUtils.closeStream(out);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createMultipartUploader() throws Exception {
|
||||
MultipartUploader mpu = null;
|
||||
UploadHandle handle = null;
|
||||
try {
|
||||
MultipartUploaderBuilder builder = fs().createMultipartUploader(path);
|
||||
final Path file = fs().makeQualified(new Path(path, "file"));
|
||||
mpu = builder.blockSize(1048576).build();
|
||||
CompletableFuture<UploadHandle> future = mpu.startUpload(file);
|
||||
handle = future.get();
|
||||
} finally {
|
||||
if (mpu != null) {
|
||||
if (handle != null) {
|
||||
try {
|
||||
mpu.abort(handle, path);
|
||||
} catch (Throwable ignored) {
|
||||
}
|
||||
}
|
||||
try {
|
||||
mpu.abortUploadsUnderPath(path);
|
||||
} catch (Throwable ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,145 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "Directory")
|
||||
public class HdfsCompatDirectory extends AbstractHdfsCompatCase {
|
||||
private static final int FILE_LEN = 128;
|
||||
private Path dir = null;
|
||||
private Path file = null;
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
this.dir = makePath("dir");
|
||||
this.file = new Path(this.dir, "file");
|
||||
HdfsCompatUtil.createFile(fs(), file, FILE_LEN);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() throws IOException {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.dir, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void isDirectory() throws IOException {
|
||||
Assert.assertTrue(fs().isDirectory(dir));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listStatus() throws IOException {
|
||||
FileStatus[] files = fs().listStatus(dir);
|
||||
Assert.assertNotNull(files);
|
||||
Assert.assertEquals(1, files.length);
|
||||
Assert.assertEquals(file.getName(), files[0].getPath().getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void globStatus() throws IOException {
|
||||
FileStatus[] files = fs().globStatus(new Path(dir, "*ile"));
|
||||
Assert.assertNotNull(files);
|
||||
Assert.assertEquals(1, files.length);
|
||||
Assert.assertEquals(file.getName(), files[0].getPath().getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listLocatedStatus() throws IOException {
|
||||
RemoteIterator<LocatedFileStatus> locatedFileStatuses =
|
||||
fs().listLocatedStatus(dir);
|
||||
Assert.assertNotNull(locatedFileStatuses);
|
||||
List<LocatedFileStatus> files = new ArrayList<>();
|
||||
while (locatedFileStatuses.hasNext()) {
|
||||
files.add(locatedFileStatuses.next());
|
||||
}
|
||||
Assert.assertEquals(1, files.size());
|
||||
LocatedFileStatus fileStatus = files.get(0);
|
||||
Assert.assertEquals(file.getName(), fileStatus.getPath().getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listStatusIterator() throws IOException {
|
||||
RemoteIterator<FileStatus> fileStatuses = fs().listStatusIterator(dir);
|
||||
Assert.assertNotNull(fileStatuses);
|
||||
List<FileStatus> files = new ArrayList<>();
|
||||
while (fileStatuses.hasNext()) {
|
||||
files.add(fileStatuses.next());
|
||||
}
|
||||
Assert.assertEquals(1, files.size());
|
||||
FileStatus fileStatus = files.get(0);
|
||||
Assert.assertEquals(file.getName(), fileStatus.getPath().getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listFiles() throws IOException {
|
||||
RemoteIterator<LocatedFileStatus> iter = fs().listFiles(dir, true);
|
||||
Assert.assertNotNull(iter);
|
||||
List<LocatedFileStatus> files = new ArrayList<>();
|
||||
while (iter.hasNext()) {
|
||||
files.add(iter.next());
|
||||
}
|
||||
Assert.assertEquals(1, files.size());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listCorruptFileBlocks() throws IOException {
|
||||
RemoteIterator<Path> iter = fs().listCorruptFileBlocks(dir);
|
||||
Assert.assertNotNull(iter);
|
||||
Assert.assertFalse(iter.hasNext()); // No corrupted file
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getContentSummary() throws IOException {
|
||||
ContentSummary summary = fs().getContentSummary(dir);
|
||||
Assert.assertEquals(1, summary.getFileCount());
|
||||
Assert.assertEquals(1, summary.getDirectoryCount());
|
||||
Assert.assertEquals(FILE_LEN, summary.getLength());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getUsed() throws IOException {
|
||||
long used = fs().getUsed(dir);
|
||||
Assert.assertTrue(used >= FILE_LEN);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getQuotaUsage() throws IOException {
|
||||
QuotaUsage usage = fs().getQuotaUsage(dir);
|
||||
Assert.assertEquals(2, usage.getFileAndDirectoryCount());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setQuota() throws IOException {
|
||||
fs().setQuota(dir, 1048576L, 1073741824L);
|
||||
QuotaUsage usage = fs().getQuotaUsage(dir);
|
||||
Assert.assertEquals(1048576L, usage.getQuota());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setQuotaByStorageType() throws IOException {
|
||||
fs().setQuotaByStorageType(dir, StorageType.DISK, 1048576L);
|
||||
QuotaUsage usage = fs().getQuotaUsage(dir);
|
||||
Assert.assertEquals(1048576L, usage.getTypeQuota(StorageType.DISK));
|
||||
}
|
||||
}
|
@ -0,0 +1,241 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Random;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "File")
|
||||
public class HdfsCompatFile extends AbstractHdfsCompatCase {
|
||||
private static final int FILE_LEN = 128;
|
||||
private static final long BLOCK_SIZE = 1048576;
|
||||
private static final short REPLICATION = 1;
|
||||
private static final Random RANDOM = new Random();
|
||||
private Path file = null;
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
this.file = makePath("file");
|
||||
HdfsCompatUtil.createFile(fs(), this.file, true,
|
||||
1024, FILE_LEN, BLOCK_SIZE, REPLICATION);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() throws IOException {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.file, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileStatus() throws IOException {
|
||||
FileStatus fileStatus = fs().getFileStatus(file);
|
||||
Assert.assertNotNull(fileStatus);
|
||||
Assert.assertEquals(file.getName(), fileStatus.getPath().getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void exists() throws IOException {
|
||||
Assert.assertTrue(fs().exists(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void isFile() throws IOException {
|
||||
Assert.assertTrue(fs().isFile(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getLength() throws IOException {
|
||||
Assert.assertEquals(FILE_LEN, fs().getLength(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase(brief = "arbitrary blockSize")
|
||||
public void getBlockSize() throws IOException {
|
||||
Assert.assertEquals(BLOCK_SIZE, fs().getBlockSize(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void renameFile() throws IOException {
|
||||
Path dst = new Path(file.toString() + "_rename_dst");
|
||||
fs().rename(file, dst);
|
||||
Assert.assertFalse(fs().exists(file));
|
||||
Assert.assertTrue(fs().exists(dst));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void deleteFile() throws IOException {
|
||||
fs().delete(file, true);
|
||||
Assert.assertFalse(fs().exists(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void deleteOnExit() throws IOException {
|
||||
FileSystem newFs = FileSystem.newInstance(fs().getUri(), fs().getConf());
|
||||
newFs.deleteOnExit(file);
|
||||
newFs.close();
|
||||
Assert.assertFalse(fs().exists(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void cancelDeleteOnExit() throws IOException {
|
||||
FileSystem newFs = FileSystem.newInstance(fs().getUri(), fs().getConf());
|
||||
newFs.deleteOnExit(file);
|
||||
newFs.cancelDeleteOnExit(file);
|
||||
newFs.close();
|
||||
Assert.assertTrue(fs().exists(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void truncate() throws IOException, InterruptedException {
|
||||
int newLen = RANDOM.nextInt(FILE_LEN);
|
||||
boolean finished = fs().truncate(file, newLen);
|
||||
while (!finished) {
|
||||
Thread.sleep(1000);
|
||||
finished = fs().truncate(file, newLen);
|
||||
}
|
||||
FileStatus fileStatus = fs().getFileStatus(file);
|
||||
Assert.assertEquals(newLen, fileStatus.getLen());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setOwner() throws Exception {
|
||||
final String owner = "test_" + RANDOM.nextInt(1024);
|
||||
final String group = "test_" + RANDOM.nextInt(1024);
|
||||
final String privileged = getPrivilegedUser();
|
||||
UserGroupInformation.createRemoteUser(privileged).doAs(
|
||||
(PrivilegedExceptionAction<Void>) () -> {
|
||||
FileSystem.newInstance(fs().getUri(), fs().getConf())
|
||||
.setOwner(file, owner, group);
|
||||
return null;
|
||||
}
|
||||
);
|
||||
FileStatus fileStatus = fs().getFileStatus(file);
|
||||
Assert.assertEquals(owner, fileStatus.getOwner());
|
||||
Assert.assertEquals(group, fileStatus.getGroup());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setTimes() throws IOException {
|
||||
final long atime = System.currentTimeMillis();
|
||||
final long mtime = atime - 1000;
|
||||
fs().setTimes(file, mtime, atime);
|
||||
FileStatus fileStatus = fs().getFileStatus(file);
|
||||
Assert.assertEquals(mtime, fileStatus.getModificationTime());
|
||||
Assert.assertEquals(atime, fileStatus.getAccessTime());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void concat() throws IOException {
|
||||
final Path dir = makePath("dir");
|
||||
try {
|
||||
final Path src = new Path(dir, "src");
|
||||
final Path dst = new Path(dir, "dst");
|
||||
HdfsCompatUtil.createFile(fs(), src, 64);
|
||||
HdfsCompatUtil.createFile(fs(), dst, 16);
|
||||
fs().concat(dst, new Path[]{src});
|
||||
FileStatus fileStatus = fs().getFileStatus(dst);
|
||||
Assert.assertEquals(16 + 64, fileStatus.getLen());
|
||||
} finally {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), dir, true);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileChecksum() throws IOException {
|
||||
FileChecksum checksum = fs().getFileChecksum(file);
|
||||
Assert.assertNotNull(checksum);
|
||||
Assert.assertNotNull(checksum.getChecksumOpt());
|
||||
DataChecksum.Type type = checksum.getChecksumOpt().getChecksumType();
|
||||
Assert.assertNotEquals(DataChecksum.Type.NULL, type);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileBlockLocations() throws IOException {
|
||||
BlockLocation[] locations = fs().getFileBlockLocations(file, 0, FILE_LEN);
|
||||
Assert.assertTrue(locations.length >= 1);
|
||||
BlockLocation location = locations[0];
|
||||
Assert.assertTrue(location.getLength() > 0);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getReplication() throws IOException {
|
||||
Assert.assertEquals(REPLICATION, fs().getReplication(file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase(brief = "arbitrary replication")
|
||||
public void setReplication() throws IOException {
|
||||
fs().setReplication(this.file, (short) 2);
|
||||
Assert.assertEquals(2, fs().getReplication(this.file));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getPathHandle() throws IOException {
|
||||
FileStatus status = fs().getFileStatus(file);
|
||||
PathHandle handle = fs().getPathHandle(status, Options.HandleOpt.path());
|
||||
final int maxReadLen = Math.min(FILE_LEN, 4096);
|
||||
byte[] data = new byte[maxReadLen];
|
||||
try (FSDataInputStream in = fs().open(handle, 1024)) {
|
||||
in.readFully(data);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void open() throws IOException {
|
||||
FSDataInputStream in = null;
|
||||
try {
|
||||
in = fs().open(file);
|
||||
in.read();
|
||||
} finally {
|
||||
IOUtils.closeStream(in);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void openFile() throws Exception {
|
||||
FSDataInputStream in = null;
|
||||
try {
|
||||
FutureDataInputStreamBuilder builder = fs().openFile(file);
|
||||
in = builder.build().get();
|
||||
} finally {
|
||||
IOUtils.closeStream(in);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void access() throws IOException {
|
||||
fs().access(file, FsAction.READ);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setPermission() throws IOException {
|
||||
fs().setPermission(file, FsPermission.createImmutable((short) 511));
|
||||
try {
|
||||
fs().access(file, FsAction.ALL);
|
||||
Assert.fail("Should not have write permission");
|
||||
} catch (Throwable ignored) {
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "Local")
|
||||
public class HdfsCompatLocal extends AbstractHdfsCompatCase {
|
||||
private static final int FILE_LEN = 128;
|
||||
private static final Random RANDOM = new Random();
|
||||
private LocalFileSystem localFs;
|
||||
private Path localBasePath;
|
||||
private Path localSrc;
|
||||
private Path localDst;
|
||||
private Path src;
|
||||
private Path dst;
|
||||
|
||||
@HdfsCompatCaseSetUp
|
||||
public void setUp() throws IOException {
|
||||
localFs = FileSystem.getLocal(fs().getConf());
|
||||
localBasePath = localFs.makeQualified(getLocalPath());
|
||||
}
|
||||
|
||||
@HdfsCompatCaseTearDown
|
||||
public void tearDown() {
|
||||
HdfsCompatUtil.deleteQuietly(localFs, localBasePath, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
final String unique = System.currentTimeMillis()
|
||||
+ "_" + RANDOM.nextLong() + "/";
|
||||
this.localSrc = new Path(localBasePath, unique + "src");
|
||||
this.localDst = new Path(localBasePath, unique + "dst");
|
||||
this.src = new Path(getBasePath(), unique + "src");
|
||||
this.dst = new Path(getBasePath(), unique + "dst");
|
||||
HdfsCompatUtil.createFile(localFs, this.localSrc, FILE_LEN);
|
||||
HdfsCompatUtil.createFile(fs(), this.src, FILE_LEN);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.src.getParent(), true);
|
||||
HdfsCompatUtil.deleteQuietly(localFs, this.localSrc.getParent(), true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void copyFromLocalFile() throws IOException {
|
||||
fs().copyFromLocalFile(localSrc, dst);
|
||||
Assert.assertTrue(localFs.exists(localSrc));
|
||||
Assert.assertTrue(fs().exists(dst));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void moveFromLocalFile() throws IOException {
|
||||
fs().moveFromLocalFile(localSrc, dst);
|
||||
Assert.assertFalse(localFs.exists(localSrc));
|
||||
Assert.assertTrue(fs().exists(dst));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void copyToLocalFile() throws IOException {
|
||||
fs().copyToLocalFile(src, localDst);
|
||||
Assert.assertTrue(fs().exists(src));
|
||||
Assert.assertTrue(localFs.exists(localDst));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void moveToLocalFile() throws IOException {
|
||||
fs().moveToLocalFile(src, localDst);
|
||||
Assert.assertFalse(fs().exists(src));
|
||||
Assert.assertTrue(localFs.exists(localDst));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void startLocalOutput() throws IOException {
|
||||
Path local = fs().startLocalOutput(dst, localDst);
|
||||
HdfsCompatUtil.createFile(localFs, local, 16);
|
||||
Assert.assertTrue(localFs.exists(local));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void completeLocalOutput() throws IOException {
|
||||
Path local = fs().startLocalOutput(dst, localDst);
|
||||
HdfsCompatUtil.createFile(localFs, local, 16);
|
||||
fs().completeLocalOutput(dst, localDst);
|
||||
Assert.assertTrue(fs().exists(dst));
|
||||
}
|
||||
}
|
@ -0,0 +1,223 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "Server")
|
||||
public class HdfsCompatServer extends AbstractHdfsCompatCase {
|
||||
private void isValid(String name) {
|
||||
Assert.assertNotNull(name);
|
||||
Assert.assertFalse(name.isEmpty());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void initialize() throws Exception {
|
||||
Class<? extends FileSystem> cls = FileSystem.getFileSystemClass(
|
||||
getBasePath().toUri().getScheme(), fs().getConf());
|
||||
Constructor<? extends FileSystem> ctor =
|
||||
cls.getDeclaredConstructor();
|
||||
ctor.setAccessible(true);
|
||||
FileSystem newFs = ctor.newInstance();
|
||||
newFs.initialize(fs().getUri(), fs().getConf());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getScheme() {
|
||||
final String scheme = fs().getScheme();
|
||||
isValid(scheme);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getUri() {
|
||||
URI uri = fs().getUri();
|
||||
isValid(uri.getScheme());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getCanonicalServiceName() {
|
||||
final String serviceName = fs().getCanonicalServiceName();
|
||||
isValid(serviceName);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getName() {
|
||||
final String name = fs().getName();
|
||||
isValid(name);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void makeQualified() {
|
||||
Path path = fs().makeQualified(makePath("file"));
|
||||
isValid(path.toUri().getScheme());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getChildFileSystems() {
|
||||
fs().getChildFileSystems();
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void resolvePath() throws IOException {
|
||||
FileSystem.enableSymlinks();
|
||||
Path file = makePath("file");
|
||||
Path link = new Path(file.toString() + "_link");
|
||||
HdfsCompatUtil.createFile(fs(), file, 0);
|
||||
fs().createSymlink(file, link, true);
|
||||
Path resolved = fs().resolvePath(link);
|
||||
Assert.assertEquals(file.getName(), resolved.getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getHomeDirectory() {
|
||||
final Path home = fs().getHomeDirectory();
|
||||
isValid(home.toString());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setWorkingDirectory() throws IOException {
|
||||
FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
|
||||
Path work = makePath("work");
|
||||
another.setWorkingDirectory(work);
|
||||
Assert.assertEquals(work.getName(),
|
||||
another.getWorkingDirectory().getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getWorkingDirectory() {
|
||||
Path work = fs().getWorkingDirectory();
|
||||
isValid(work.toString());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void close() throws IOException {
|
||||
FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
|
||||
another.close();
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getDefaultBlockSize() {
|
||||
Assert.assertTrue(fs().getDefaultBlockSize(getBasePath()) >= 0);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getDefaultReplication() {
|
||||
Assert.assertTrue(fs().getDefaultReplication(getBasePath()) >= 0);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getStorageStatistics() {
|
||||
Assert.assertNotNull(fs().getStorageStatistics());
|
||||
}
|
||||
|
||||
// @HdfsCompatCase
|
||||
public void setVerifyChecksum() {
|
||||
}
|
||||
|
||||
// @HdfsCompatCase
|
||||
public void setWriteChecksum() {
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getDelegationToken() throws IOException {
|
||||
Assert.assertNotNull(fs().getDelegationToken(getDelegationTokenRenewer()));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getAdditionalTokenIssuers() throws IOException {
|
||||
Assert.assertNotNull(fs().getAdditionalTokenIssuers());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getServerDefaults() throws IOException {
|
||||
FsServerDefaults d = fs().getServerDefaults(getBasePath());
|
||||
Assert.assertTrue(d.getBlockSize() >= 0);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void msync() throws IOException {
|
||||
fs().msync();
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getStatus() throws IOException {
|
||||
FsStatus status = fs().getStatus();
|
||||
Assert.assertTrue(status.getRemaining() > 0);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getTrashRoot() {
|
||||
Path trash = fs().getTrashRoot(makePath("file"));
|
||||
isValid(trash.toString());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getTrashRoots() {
|
||||
Collection<FileStatus> trashes = fs().getTrashRoots(true);
|
||||
Assert.assertNotNull(trashes);
|
||||
for (FileStatus trash : trashes) {
|
||||
isValid(trash.getPath().toString());
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getAllStoragePolicies() throws IOException {
|
||||
Collection<? extends BlockStoragePolicySpi> policies =
|
||||
fs().getAllStoragePolicies();
|
||||
Assert.assertFalse(policies.isEmpty());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void supportsSymlinks() {
|
||||
Assert.assertTrue(fs().supportsSymlinks());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void hasPathCapability() throws IOException {
|
||||
List<String> allCaps = new ArrayList<>();
|
||||
allCaps.add(CommonPathCapabilities.FS_ACLS);
|
||||
allCaps.add(CommonPathCapabilities.FS_APPEND);
|
||||
allCaps.add(CommonPathCapabilities.FS_CHECKSUMS);
|
||||
allCaps.add(CommonPathCapabilities.FS_CONCAT);
|
||||
allCaps.add(CommonPathCapabilities.FS_LIST_CORRUPT_FILE_BLOCKS);
|
||||
allCaps.add(CommonPathCapabilities.FS_PATHHANDLES);
|
||||
allCaps.add(CommonPathCapabilities.FS_PERMISSIONS);
|
||||
allCaps.add(CommonPathCapabilities.FS_READ_ONLY_CONNECTOR);
|
||||
allCaps.add(CommonPathCapabilities.FS_SNAPSHOTS);
|
||||
allCaps.add(CommonPathCapabilities.FS_STORAGEPOLICY);
|
||||
allCaps.add(CommonPathCapabilities.FS_SYMLINKS);
|
||||
allCaps.add(CommonPathCapabilities.FS_TRUNCATE);
|
||||
allCaps.add(CommonPathCapabilities.FS_XATTRS);
|
||||
final Path base = getBasePath();
|
||||
for (String cap : allCaps) {
|
||||
if (fs().hasPathCapability(base, cap)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw new IOException("Cannot find any path capability");
|
||||
}
|
||||
}
|
@ -0,0 +1,137 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "Snapshot")
|
||||
public class HdfsCompatSnapshot extends AbstractHdfsCompatCase {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatSnapshot.class);
|
||||
private final String snapshotName = "s-name";
|
||||
private final String fileName = "file";
|
||||
private Path base;
|
||||
private Path dir;
|
||||
private Path snapshot;
|
||||
private Method allow;
|
||||
private Method disallow;
|
||||
|
||||
private static Path getSnapshotPath(Path path, String snapshotName) {
|
||||
return new Path(path, ".snapshot/" + snapshotName);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseSetUp
|
||||
public void setUp() throws Exception {
|
||||
this.base = getUniquePath();
|
||||
fs().mkdirs(this.base);
|
||||
try {
|
||||
Method allowSnapshotMethod = fs().getClass()
|
||||
.getMethod("allowSnapshot", Path.class);
|
||||
allowSnapshotMethod.setAccessible(true);
|
||||
allowSnapshotMethod.invoke(fs(), this.base);
|
||||
this.allow = allowSnapshotMethod;
|
||||
|
||||
Method disallowSnapshotMethod = fs().getClass()
|
||||
.getMethod("disallowSnapshot", Path.class);
|
||||
disallowSnapshotMethod.setAccessible(true);
|
||||
disallowSnapshotMethod.invoke(fs(), this.base);
|
||||
this.disallow = disallowSnapshotMethod;
|
||||
} catch (InvocationTargetException e) {
|
||||
// Method exists but the invocation throws an exception.
|
||||
Throwable cause = e.getCause();
|
||||
if (cause instanceof Exception) {
|
||||
throw (Exception) cause;
|
||||
} else {
|
||||
throw new RuntimeException(cause);
|
||||
}
|
||||
} catch (ReflectiveOperationException e) {
|
||||
if (this.allow == null) {
|
||||
LOG.warn("No allowSnapshot method found.");
|
||||
}
|
||||
if (this.disallow == null) {
|
||||
LOG.warn("No disallowSnapshot method found.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCaseTearDown
|
||||
public void tearDown() throws ReflectiveOperationException {
|
||||
try {
|
||||
if (this.disallow != null) {
|
||||
disallow.invoke(fs(), this.base);
|
||||
}
|
||||
} finally {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.base, true);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException, ReflectiveOperationException {
|
||||
this.dir = getUniquePath(base);
|
||||
HdfsCompatUtil.createFile(fs(), new Path(this.dir, this.fileName), 0);
|
||||
if (this.allow != null) {
|
||||
allow.invoke(fs(), this.dir);
|
||||
}
|
||||
this.snapshot = fs().createSnapshot(this.dir, this.snapshotName);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() throws ReflectiveOperationException {
|
||||
try {
|
||||
try {
|
||||
fs().deleteSnapshot(this.dir, this.snapshotName);
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
if (this.disallow != null) {
|
||||
disallow.invoke(fs(), this.dir);
|
||||
}
|
||||
} finally {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.dir, true);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createSnapshot() throws IOException {
|
||||
Assert.assertNotEquals(snapshot.toString(), dir.toString());
|
||||
Assert.assertTrue(fs().exists(snapshot));
|
||||
Assert.assertTrue(fs().exists(new Path(snapshot, fileName)));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void renameSnapshot() throws IOException {
|
||||
fs().renameSnapshot(dir, snapshotName, "s-name2");
|
||||
Assert.assertFalse(fs().exists(new Path(snapshot, fileName)));
|
||||
snapshot = getSnapshotPath(dir, "s-name2");
|
||||
Assert.assertTrue(fs().exists(new Path(snapshot, fileName)));
|
||||
fs().renameSnapshot(dir, "s-name2", snapshotName);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void deleteSnapshot() throws IOException {
|
||||
fs().deleteSnapshot(dir, snapshotName);
|
||||
Assert.assertFalse(fs().exists(snapshot));
|
||||
Assert.assertFalse(fs().exists(new Path(snapshot, fileName)));
|
||||
}
|
||||
}
|
@ -0,0 +1,106 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "StoragePolicy")
|
||||
public class HdfsCompatStoragePolicy extends AbstractHdfsCompatCase {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HdfsCompatStoragePolicy.class);
|
||||
private static final Random RANDOM = new Random();
|
||||
private Path dir;
|
||||
private Path file;
|
||||
private String[] policies;
|
||||
private String defaultPolicyName;
|
||||
private String policyName;
|
||||
|
||||
@HdfsCompatCaseSetUp
|
||||
public void setUp() throws IOException {
|
||||
policies = getStoragePolicyNames();
|
||||
}
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
this.dir = makePath("dir");
|
||||
this.file = new Path(this.dir, "file");
|
||||
HdfsCompatUtil.createFile(fs(), file, 0);
|
||||
|
||||
BlockStoragePolicySpi policy = fs().getStoragePolicy(this.dir);
|
||||
this.defaultPolicyName = (policy == null) ? null : policy.getName();
|
||||
|
||||
List<String> differentPolicies = new ArrayList<>();
|
||||
for (String name : policies) {
|
||||
if (!name.equalsIgnoreCase(defaultPolicyName)) {
|
||||
differentPolicies.add(name);
|
||||
}
|
||||
}
|
||||
if (differentPolicies.isEmpty()) {
|
||||
LOG.warn("There is only one storage policy: " +
|
||||
(defaultPolicyName == null ? "null" : defaultPolicyName));
|
||||
this.policyName = defaultPolicyName;
|
||||
} else {
|
||||
this.policyName = differentPolicies.get(
|
||||
RANDOM.nextInt(differentPolicies.size()));
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.dir, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setStoragePolicy() throws IOException {
|
||||
fs().setStoragePolicy(dir, policyName);
|
||||
BlockStoragePolicySpi policy = fs().getStoragePolicy(dir);
|
||||
Assert.assertEquals(policyName, policy.getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void unsetStoragePolicy() throws IOException {
|
||||
fs().setStoragePolicy(dir, policyName);
|
||||
fs().unsetStoragePolicy(dir);
|
||||
BlockStoragePolicySpi policy = fs().getStoragePolicy(dir);
|
||||
String policyNameAfterUnset = (policy == null) ? null : policy.getName();
|
||||
Assert.assertEquals(defaultPolicyName, policyNameAfterUnset);
|
||||
}
|
||||
|
||||
@HdfsCompatCase(ifDef = "org.apache.hadoop.fs.FileSystem#satisfyStoragePolicy")
|
||||
public void satisfyStoragePolicy() throws IOException {
|
||||
fs().setStoragePolicy(dir, policyName);
|
||||
fs().satisfyStoragePolicy(dir);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getStoragePolicy() throws IOException {
|
||||
BlockStoragePolicySpi policy = fs().getStoragePolicy(file);
|
||||
String initialPolicyName = (policy == null) ? null : policy.getName();
|
||||
Assert.assertEquals(defaultPolicyName, initialPolicyName);
|
||||
}
|
||||
}
|
@ -0,0 +1,70 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "Symlink")
|
||||
public class HdfsCompatSymlink extends AbstractHdfsCompatCase {
|
||||
private static final int FILE_LEN = 128;
|
||||
private Path target = null;
|
||||
private Path link = null;
|
||||
|
||||
@HdfsCompatCaseSetUp
|
||||
public void setUp() {
|
||||
FileSystem.enableSymlinks();
|
||||
}
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
this.target = makePath("target");
|
||||
this.link = new Path(this.target.getParent(), "link");
|
||||
HdfsCompatUtil.createFile(fs(), this.target, FILE_LEN);
|
||||
fs().createSymlink(this.target, this.link, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() throws IOException {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.link, true);
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.target, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void createSymlink() throws IOException {
|
||||
Assert.assertTrue(fs().exists(link));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileLinkStatus() throws IOException {
|
||||
FileStatus linkStatus = fs().getFileLinkStatus(link);
|
||||
Assert.assertTrue(linkStatus.isSymlink());
|
||||
Assert.assertEquals(target.getName(), linkStatus.getSymlink().getName());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getLinkTarget() throws IOException {
|
||||
Path src = fs().getLinkTarget(link);
|
||||
Assert.assertEquals(target.getName(), src.getName());
|
||||
}
|
||||
}
|
@ -0,0 +1,121 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "TPCDS")
|
||||
public class HdfsCompatTpcds extends AbstractHdfsCompatCase {
|
||||
private static final int FILE_LEN = 8;
|
||||
private static final Random RANDOM = new Random();
|
||||
private Path path = null;
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
path = makePath("path");
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() throws IOException {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), path, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void open() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), path, FILE_LEN);
|
||||
byte[] data = new byte[FILE_LEN];
|
||||
try (FSDataInputStream in = fs().open(path)) {
|
||||
in.readFully(data);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void create() throws IOException {
|
||||
byte[] data = new byte[FILE_LEN];
|
||||
RANDOM.nextBytes(data);
|
||||
try (FSDataOutputStream out = fs().create(path, true)) {
|
||||
out.write(data);
|
||||
}
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void mkdirs() throws IOException {
|
||||
Assert.assertTrue(fs().mkdirs(path));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getFileStatus() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), path, FILE_LEN);
|
||||
FileStatus fileStatus = fs().getFileStatus(path);
|
||||
Assert.assertEquals(FILE_LEN, fileStatus.getLen());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listStatus() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN);
|
||||
FileStatus[] files = fs().listStatus(path);
|
||||
Assert.assertEquals(1, files.length);
|
||||
Assert.assertEquals(FILE_LEN, files[0].getLen());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listLocatedStatus() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN);
|
||||
RemoteIterator<LocatedFileStatus> it = fs().listLocatedStatus(path);
|
||||
List<LocatedFileStatus> files = new ArrayList<>();
|
||||
while (it.hasNext()) {
|
||||
files.add(it.next());
|
||||
}
|
||||
Assert.assertEquals(1, files.size());
|
||||
Assert.assertEquals(FILE_LEN, files.get(0).getLen());
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void rename() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN);
|
||||
fs().rename(path, new Path(path.getParent(), path.getName() + "_dst"));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void delete() throws IOException {
|
||||
HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN);
|
||||
fs().delete(path, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getServerDefaults() throws IOException {
|
||||
Assert.assertNotNull(fs().getServerDefaults(path));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getTrashRoot() throws IOException {
|
||||
Assert.assertNotNull(fs().getTrashRoot(path));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void makeQualified() throws IOException {
|
||||
Assert.assertNotNull(fs().makeQualified(path));
|
||||
}
|
||||
}
|
@ -0,0 +1,100 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.compat.common.*;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@HdfsCompatCaseGroup(name = "XAttr")
|
||||
public class HdfsCompatXAttr extends AbstractHdfsCompatCase {
|
||||
private Path file;
|
||||
|
||||
@HdfsCompatCasePrepare
|
||||
public void prepare() throws IOException {
|
||||
this.file = makePath("file");
|
||||
HdfsCompatUtil.createFile(fs(), this.file, 0);
|
||||
}
|
||||
|
||||
@HdfsCompatCaseCleanup
|
||||
public void cleanup() {
|
||||
HdfsCompatUtil.deleteQuietly(fs(), this.file, true);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setXAttr() throws IOException {
|
||||
final String key = "user.key";
|
||||
final byte[] value = "value".getBytes(StandardCharsets.UTF_8);
|
||||
fs().setXAttr(file, key, value);
|
||||
Map<String, byte[]> attrs = fs().getXAttrs(file);
|
||||
Assert.assertArrayEquals(value, attrs.getOrDefault(key, new byte[0]));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getXAttr() throws IOException {
|
||||
final String key = "user.key";
|
||||
final byte[] value = "value".getBytes(StandardCharsets.UTF_8);
|
||||
fs().setXAttr(file, key, value);
|
||||
byte[] attr = fs().getXAttr(file, key);
|
||||
Assert.assertArrayEquals(value, attr);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getXAttrs() throws IOException {
|
||||
fs().setXAttr(file, "user.key1",
|
||||
"value1".getBytes(StandardCharsets.UTF_8));
|
||||
fs().setXAttr(file, "user.key2",
|
||||
"value2".getBytes(StandardCharsets.UTF_8));
|
||||
List<String> keys = new ArrayList<>();
|
||||
keys.add("user.key1");
|
||||
Map<String, byte[]> attrs = fs().getXAttrs(file, keys);
|
||||
Assert.assertEquals(1, attrs.size());
|
||||
byte[] attr = attrs.getOrDefault("user.key1", new byte[0]);
|
||||
Assert.assertArrayEquals("value1".getBytes(StandardCharsets.UTF_8), attr);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void listXAttrs() throws IOException {
|
||||
fs().setXAttr(file, "user.key1",
|
||||
"value1".getBytes(StandardCharsets.UTF_8));
|
||||
fs().setXAttr(file, "user.key2",
|
||||
"value2".getBytes(StandardCharsets.UTF_8));
|
||||
List<String> names = fs().listXAttrs(file);
|
||||
Assert.assertEquals(2, names.size());
|
||||
Assert.assertTrue(names.contains("user.key1"));
|
||||
Assert.assertTrue(names.contains("user.key2"));
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeXAttr() throws IOException {
|
||||
fs().setXAttr(file, "user.key1",
|
||||
"value1".getBytes(StandardCharsets.UTF_8));
|
||||
fs().setXAttr(file, "user.key2",
|
||||
"value2".getBytes(StandardCharsets.UTF_8));
|
||||
fs().removeXAttr(file, "user.key1");
|
||||
List<String> names = fs().listXAttrs(file);
|
||||
Assert.assertEquals(1, names.size());
|
||||
Assert.assertTrue(names.contains("user.key2"));
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This contains default cases for
|
||||
* {@link org.apache.hadoop.fs.FileSystem} APIs.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
@ -0,0 +1,84 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
public abstract class AbstractHdfsCompatCase {
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
private FileSystem fs;
|
||||
private HdfsCompatEnvironment env;
|
||||
private Path localPath;
|
||||
|
||||
public AbstractHdfsCompatCase() {
|
||||
}
|
||||
|
||||
public void init(HdfsCompatEnvironment environment) {
|
||||
this.env = environment;
|
||||
this.fs = env.getFileSystem();
|
||||
LocalFileSystem localFs = env.getLocalFileSystem();
|
||||
this.localPath = localFs.makeQualified(new Path(env.getLocalTmpDir()));
|
||||
}
|
||||
|
||||
public FileSystem fs() {
|
||||
return fs;
|
||||
}
|
||||
|
||||
public Path getRootPath() {
|
||||
return this.env.getRoot();
|
||||
}
|
||||
|
||||
public Path getBasePath() {
|
||||
return this.env.getBase();
|
||||
}
|
||||
|
||||
public Path getUniquePath() {
|
||||
return getUniquePath(getBasePath());
|
||||
}
|
||||
|
||||
public static Path getUniquePath(Path basePath) {
|
||||
return new Path(basePath, System.currentTimeMillis()
|
||||
+ "_" + RANDOM.nextLong());
|
||||
}
|
||||
|
||||
public Path makePath(String name) {
|
||||
return new Path(getUniquePath(), name);
|
||||
}
|
||||
|
||||
public Path getLocalPath() {
|
||||
return localPath;
|
||||
}
|
||||
|
||||
public String getPrivilegedUser() {
|
||||
return this.env.getPrivilegedUser();
|
||||
}
|
||||
|
||||
public String[] getStoragePolicyNames() {
|
||||
return this.env.getStoragePolicyNames();
|
||||
}
|
||||
|
||||
public String getDelegationTokenRenewer() {
|
||||
return this.env.getDelegationTokenRenewer();
|
||||
}
|
||||
}
|
@ -0,0 +1,358 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.fs.compat.HdfsCompatTool;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
public class HdfsCompatApiScope {
|
||||
static final boolean SKIP_NO_SUCH_METHOD_ERROR = true;
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HdfsCompatApiScope.class);
|
||||
|
||||
private final HdfsCompatEnvironment env;
|
||||
private final HdfsCompatSuite suite;
|
||||
|
||||
public HdfsCompatApiScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
this.env = env;
|
||||
this.suite = suite;
|
||||
}
|
||||
|
||||
public HdfsCompatReport apply() {
|
||||
List<GroupedCase> groups = collectGroup();
|
||||
HdfsCompatReport report = new HdfsCompatReport();
|
||||
for (GroupedCase group : groups) {
|
||||
if (group.methods.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
final AbstractHdfsCompatCase obj = group.obj;
|
||||
GroupedResult groupedResult = new GroupedResult(obj, group.methods);
|
||||
|
||||
// SetUp
|
||||
groupedResult.setUp = test(group.setUp, obj);
|
||||
|
||||
if (groupedResult.setUp == Result.OK) {
|
||||
for (Method method : group.methods) {
|
||||
CaseResult caseResult = new CaseResult();
|
||||
|
||||
// Prepare
|
||||
caseResult.prepareResult = test(group.prepare, obj);
|
||||
|
||||
if (caseResult.prepareResult == Result.OK) { // Case
|
||||
caseResult.methodResult = test(method, obj);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
caseResult.cleanupResult = test(group.cleanup, obj);
|
||||
|
||||
groupedResult.results.put(getCaseName(method), caseResult);
|
||||
}
|
||||
}
|
||||
|
||||
// TearDown
|
||||
groupedResult.tearDown = test(group.tearDown, obj);
|
||||
|
||||
groupedResult.exportTo(report);
|
||||
}
|
||||
return report;
|
||||
}
|
||||
|
||||
private Result test(Method method, AbstractHdfsCompatCase obj) {
|
||||
if (method == null) { // Empty method, just OK.
|
||||
return Result.OK;
|
||||
}
|
||||
try {
|
||||
method.invoke(obj);
|
||||
return Result.OK;
|
||||
} catch (InvocationTargetException t) {
|
||||
Throwable e = t.getCause();
|
||||
if (SKIP_NO_SUCH_METHOD_ERROR && (e instanceof NoSuchMethodError)) {
|
||||
LOG.warn("Case skipped with method " + method.getName()
|
||||
+ " of class " + obj.getClass(), e);
|
||||
return Result.SKIP;
|
||||
} else {
|
||||
LOG.warn("Case failed with method " + method.getName()
|
||||
+ " of class " + obj.getClass(), e);
|
||||
return Result.ERROR;
|
||||
}
|
||||
} catch (ReflectiveOperationException e) {
|
||||
LOG.error("Illegal Compatibility Case method " + method.getName()
|
||||
+ " of class " + obj.getClass(), e);
|
||||
throw new HdfsCompatIllegalCaseException(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private List<GroupedCase> collectGroup() {
|
||||
Class<? extends AbstractHdfsCompatCase>[] cases = suite.getApiCases();
|
||||
List<GroupedCase> groups = new ArrayList<>();
|
||||
for (Class<? extends AbstractHdfsCompatCase> cls : cases) {
|
||||
try {
|
||||
groups.add(GroupedCase.parse(cls, this.env));
|
||||
} catch (ReflectiveOperationException e) {
|
||||
LOG.error("Illegal Compatibility Group " + cls.getName(), e);
|
||||
throw new HdfsCompatIllegalCaseException(e.getMessage());
|
||||
}
|
||||
}
|
||||
return groups;
|
||||
}
|
||||
|
||||
private static String getCaseName(Method caseMethod) {
|
||||
HdfsCompatCase annotation = caseMethod.getAnnotation(HdfsCompatCase.class);
|
||||
assert (annotation != null);
|
||||
if (annotation.brief().isEmpty()) {
|
||||
return caseMethod.getName();
|
||||
} else {
|
||||
return caseMethod.getName() + " (" + annotation.brief() + ")";
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static Set<String> getPublicInterfaces(Class<?> cls) {
|
||||
Method[] methods = cls.getDeclaredMethods();
|
||||
Set<String> publicMethodNames = new HashSet<>();
|
||||
for (Method method : methods) {
|
||||
int modifiers = method.getModifiers();
|
||||
if (Modifier.isPublic(modifiers) && !Modifier.isStatic(modifiers)) {
|
||||
publicMethodNames.add(method.getName());
|
||||
}
|
||||
}
|
||||
publicMethodNames.remove(cls.getSimpleName());
|
||||
publicMethodNames.remove("toString");
|
||||
return publicMethodNames;
|
||||
}
|
||||
|
||||
private static final class GroupedCase {
|
||||
private static final Map<String, Set<String>> DEFINED_METHODS =
|
||||
new HashMap<>();
|
||||
private final AbstractHdfsCompatCase obj;
|
||||
private final List<Method> methods;
|
||||
private final Method setUp;
|
||||
private final Method tearDown;
|
||||
private final Method prepare;
|
||||
private final Method cleanup;
|
||||
|
||||
private GroupedCase(AbstractHdfsCompatCase obj, List<Method> methods,
|
||||
Method setUp, Method tearDown,
|
||||
Method prepare, Method cleanup) {
|
||||
this.obj = obj;
|
||||
this.methods = methods;
|
||||
this.setUp = setUp;
|
||||
this.tearDown = tearDown;
|
||||
this.prepare = prepare;
|
||||
this.cleanup = cleanup;
|
||||
}
|
||||
|
||||
private static GroupedCase parse(Class<? extends AbstractHdfsCompatCase> cls,
|
||||
HdfsCompatEnvironment env)
|
||||
throws ReflectiveOperationException {
|
||||
Constructor<? extends AbstractHdfsCompatCase> ctor = cls.getConstructor();
|
||||
ctor.setAccessible(true);
|
||||
AbstractHdfsCompatCase caseObj = ctor.newInstance();
|
||||
caseObj.init(env);
|
||||
Method[] declaredMethods = caseObj.getClass().getDeclaredMethods();
|
||||
List<Method> caseMethods = new ArrayList<>();
|
||||
Method setUp = null;
|
||||
Method tearDown = null;
|
||||
Method prepare = null;
|
||||
Method cleanup = null;
|
||||
for (Method method : declaredMethods) {
|
||||
if (method.isAnnotationPresent(HdfsCompatCase.class)) {
|
||||
if (method.isAnnotationPresent(HdfsCompatCaseSetUp.class) ||
|
||||
method.isAnnotationPresent(HdfsCompatCaseTearDown.class) ||
|
||||
method.isAnnotationPresent(HdfsCompatCasePrepare.class) ||
|
||||
method.isAnnotationPresent(HdfsCompatCaseCleanup.class)) {
|
||||
throw new HdfsCompatIllegalCaseException(
|
||||
"Compatibility Case must not be annotated by" +
|
||||
" Prepare/Cleanup or SetUp/TearDown");
|
||||
}
|
||||
HdfsCompatCase annotation = method.getAnnotation(HdfsCompatCase.class);
|
||||
if (annotation.ifDef().isEmpty()) {
|
||||
caseMethods.add(method);
|
||||
} else {
|
||||
String[] requireDefined = annotation.ifDef().split(",");
|
||||
if (Arrays.stream(requireDefined).allMatch(GroupedCase::checkDefined)) {
|
||||
caseMethods.add(method);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (method.isAnnotationPresent(HdfsCompatCaseSetUp.class)) {
|
||||
if (setUp != null) {
|
||||
throw new HdfsCompatIllegalCaseException(
|
||||
"Duplicate SetUp method in Compatibility Case");
|
||||
}
|
||||
setUp = method;
|
||||
}
|
||||
if (method.isAnnotationPresent(HdfsCompatCaseTearDown.class)) {
|
||||
if (tearDown != null) {
|
||||
throw new HdfsCompatIllegalCaseException(
|
||||
"Duplicate TearDown method in Compatibility Case");
|
||||
}
|
||||
tearDown = method;
|
||||
}
|
||||
if (method.isAnnotationPresent(HdfsCompatCasePrepare.class)) {
|
||||
if (prepare != null) {
|
||||
throw new HdfsCompatIllegalCaseException(
|
||||
"Duplicate Prepare method in Compatibility Case");
|
||||
}
|
||||
prepare = method;
|
||||
}
|
||||
if (method.isAnnotationPresent(HdfsCompatCaseCleanup.class)) {
|
||||
if (cleanup != null) {
|
||||
throw new HdfsCompatIllegalCaseException(
|
||||
"Duplicate Cleanup method in Compatibility Case");
|
||||
}
|
||||
cleanup = method;
|
||||
}
|
||||
}
|
||||
}
|
||||
return new GroupedCase(caseObj, caseMethods,
|
||||
setUp, tearDown, prepare, cleanup);
|
||||
}
|
||||
|
||||
private static synchronized boolean checkDefined(String ifDef) {
|
||||
String[] classAndMethod = ifDef.split("#", 2);
|
||||
if (classAndMethod.length < 2) {
|
||||
throw new HdfsCompatIllegalCaseException(
|
||||
"ifDef must be with format className#methodName");
|
||||
}
|
||||
final String className = classAndMethod[0];
|
||||
final String methodName = classAndMethod[1];
|
||||
Set<String> methods = DEFINED_METHODS.getOrDefault(className, null);
|
||||
if (methods != null) {
|
||||
return methods.contains(methodName);
|
||||
}
|
||||
Class<?> cls;
|
||||
try {
|
||||
cls = Class.forName(className);
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new HdfsCompatIllegalCaseException(e.getMessage());
|
||||
}
|
||||
methods = getPublicInterfaces(cls);
|
||||
DEFINED_METHODS.put(className, methods);
|
||||
return methods.contains(methodName);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class GroupedResult {
|
||||
private static final int COMMON_PREFIX_LEN = HdfsCompatTool.class
|
||||
.getPackage().getName().length() + ".cases.".length();
|
||||
private final String prefix;
|
||||
private Result setUp;
|
||||
private Result tearDown;
|
||||
private final LinkedHashMap<String, CaseResult> results;
|
||||
|
||||
private GroupedResult(AbstractHdfsCompatCase obj, List<Method> methods) {
|
||||
this.prefix = getNamePrefix(obj.getClass());
|
||||
this.results = new LinkedHashMap<>();
|
||||
for (Method method : methods) {
|
||||
this.results.put(getCaseName(method), new CaseResult());
|
||||
}
|
||||
}
|
||||
|
||||
private void exportTo(HdfsCompatReport report) {
|
||||
if (this.setUp == Result.SKIP) {
|
||||
List<String> cases = results.keySet().stream().map(m -> prefix + m)
|
||||
.collect(Collectors.toList());
|
||||
report.addSkippedCase(cases);
|
||||
return;
|
||||
}
|
||||
if ((this.setUp == Result.ERROR) || (this.tearDown == Result.ERROR)) {
|
||||
List<String> cases = results.keySet().stream().map(m -> prefix + m)
|
||||
.collect(Collectors.toList());
|
||||
report.addFailedCase(cases);
|
||||
return;
|
||||
}
|
||||
|
||||
List<String> passed = new ArrayList<>();
|
||||
List<String> failed = new ArrayList<>();
|
||||
List<String> skipped = new ArrayList<>();
|
||||
for (Map.Entry<String, CaseResult> entry : results.entrySet()) {
|
||||
final String caseName = prefix + entry.getKey();
|
||||
CaseResult result = entry.getValue();
|
||||
if (result.prepareResult == Result.SKIP) {
|
||||
skipped.add(caseName);
|
||||
continue;
|
||||
}
|
||||
if ((result.prepareResult == Result.ERROR) ||
|
||||
(result.cleanupResult == Result.ERROR) ||
|
||||
(result.methodResult == Result.ERROR)) {
|
||||
failed.add(caseName);
|
||||
} else if (result.methodResult == Result.OK) {
|
||||
passed.add(caseName);
|
||||
} else {
|
||||
skipped.add(caseName);
|
||||
}
|
||||
}
|
||||
|
||||
if (!passed.isEmpty()) {
|
||||
report.addPassedCase(passed);
|
||||
}
|
||||
if (!failed.isEmpty()) {
|
||||
report.addFailedCase(failed);
|
||||
}
|
||||
if (!skipped.isEmpty()) {
|
||||
report.addSkippedCase(skipped);
|
||||
}
|
||||
}
|
||||
|
||||
private static String getNamePrefix(Class<? extends AbstractHdfsCompatCase> cls) {
|
||||
return (cls.getPackage().getName() + ".").substring(COMMON_PREFIX_LEN) +
|
||||
getGroupName(cls) + ".";
|
||||
}
|
||||
|
||||
private static String getGroupName(Class<? extends AbstractHdfsCompatCase> cls) {
|
||||
if (cls.isAnnotationPresent(HdfsCompatCaseGroup.class)) {
|
||||
HdfsCompatCaseGroup annotation = cls.getAnnotation(HdfsCompatCaseGroup.class);
|
||||
if (!annotation.name().isEmpty()) {
|
||||
return annotation.name();
|
||||
}
|
||||
}
|
||||
return cls.getSimpleName();
|
||||
}
|
||||
}
|
||||
|
||||
private static class CaseResult {
|
||||
private Result prepareResult = Result.SKIP;
|
||||
private Result cleanupResult = Result.SKIP;
|
||||
private Result methodResult = Result.SKIP;
|
||||
}
|
||||
|
||||
private enum Result {
|
||||
OK,
|
||||
ERROR,
|
||||
SKIP,
|
||||
}
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.METHOD})
|
||||
public @interface HdfsCompatCase {
|
||||
String brief() default "";
|
||||
|
||||
String ifDef() default "";
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.METHOD})
|
||||
public @interface HdfsCompatCaseCleanup {
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.TYPE})
|
||||
public @interface HdfsCompatCaseGroup {
|
||||
String name() default "";
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.METHOD})
|
||||
public @interface HdfsCompatCasePrepare {
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.METHOD})
|
||||
public @interface HdfsCompatCaseSetUp {
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.METHOD})
|
||||
public @interface HdfsCompatCaseTearDown {
|
||||
}
|
@ -0,0 +1,127 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForAll;
|
||||
import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForShell;
|
||||
import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForTpcds;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class HdfsCompatCommand {
|
||||
private final Path uri;
|
||||
private final String suiteName;
|
||||
private final Configuration conf;
|
||||
private HdfsCompatSuite suite;
|
||||
private HdfsCompatApiScope api;
|
||||
private HdfsCompatShellScope shell;
|
||||
|
||||
public HdfsCompatCommand(String uri, String suiteName, Configuration conf) {
|
||||
this.uri = new Path(uri);
|
||||
this.suiteName = suiteName.toLowerCase();
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public void initialize() throws ReflectiveOperationException, IOException {
|
||||
initSuite();
|
||||
HdfsCompatEnvironment env = new HdfsCompatEnvironment(uri, conf);
|
||||
env.init();
|
||||
if (hasApiCase()) {
|
||||
api = new HdfsCompatApiScope(env, suite);
|
||||
}
|
||||
if (hasShellCase()) {
|
||||
shell = new HdfsCompatShellScope(env, suite);
|
||||
}
|
||||
}
|
||||
|
||||
public HdfsCompatReport apply() throws Exception {
|
||||
HdfsCompatReport report = new HdfsCompatReport(uri.toString(), suite);
|
||||
if (api != null) {
|
||||
report.merge(api.apply());
|
||||
}
|
||||
if (shell != null) {
|
||||
report.merge(shell.apply());
|
||||
}
|
||||
return report;
|
||||
}
|
||||
|
||||
private void initSuite() throws ReflectiveOperationException {
|
||||
Map<String, HdfsCompatSuite> defaultSuites = getDefaultSuites();
|
||||
this.suite = defaultSuites.getOrDefault(this.suiteName, null);
|
||||
if (this.suite != null) {
|
||||
return;
|
||||
}
|
||||
String key = "hadoop.compatibility.suite." + this.suiteName + ".classname";
|
||||
final String suiteClassName = conf.get(key, null);
|
||||
if ((suiteClassName == null) || suiteClassName.isEmpty()) {
|
||||
throw new HdfsCompatIllegalArgumentException(
|
||||
"cannot get class name for suite " + this.suiteName +
|
||||
", configuration " + key + " is not properly set.");
|
||||
}
|
||||
Constructor<?> ctor = suiteClassName.getClass().getConstructor();
|
||||
ctor.setAccessible(true);
|
||||
Object suiteObj = ctor.newInstance();
|
||||
if (suiteObj instanceof HdfsCompatSuite) {
|
||||
this.suite = (HdfsCompatSuite) suiteObj;
|
||||
} else {
|
||||
throw new HdfsCompatIllegalArgumentException(
|
||||
"class name " + suiteClassName + " must be an" +
|
||||
" implementation of " + HdfsCompatSuite.class.getName());
|
||||
}
|
||||
if (suite.getSuiteName() == null || suite.getSuiteName().isEmpty()) {
|
||||
throw new HdfsCompatIllegalArgumentException(
|
||||
"suite " + suiteClassName + " suiteName is empty");
|
||||
}
|
||||
for (HdfsCompatSuite defaultSuite : defaultSuites.values()) {
|
||||
if (suite.getSuiteName().equalsIgnoreCase(defaultSuite.getSuiteName())) {
|
||||
throw new HdfsCompatIllegalArgumentException(
|
||||
"suite " + suiteClassName + " suiteName" +
|
||||
" conflicts with default suite " + defaultSuite.getSuiteName());
|
||||
}
|
||||
}
|
||||
if (!hasApiCase() && !hasShellCase()) {
|
||||
throw new HdfsCompatIllegalArgumentException(
|
||||
"suite " + suiteClassName + " is empty for both API and SHELL");
|
||||
}
|
||||
}
|
||||
|
||||
private boolean hasApiCase() {
|
||||
return (suite.getApiCases() != null) &&
|
||||
(suite.getApiCases().length > 0);
|
||||
}
|
||||
|
||||
private boolean hasShellCase() {
|
||||
return (suite.getShellCases() != null) &&
|
||||
(suite.getShellCases().length > 0);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected Map<String, HdfsCompatSuite> getDefaultSuites() {
|
||||
Map<String, HdfsCompatSuite> defaultSuites = new HashMap<>();
|
||||
defaultSuites.put("all", new HdfsCompatSuiteForAll());
|
||||
defaultSuites.put("shell", new HdfsCompatSuiteForShell());
|
||||
defaultSuites.put("tpcds", new HdfsCompatSuiteForTpcds());
|
||||
return defaultSuites;
|
||||
}
|
||||
}
|
@ -0,0 +1,155 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class HdfsCompatEnvironment {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HdfsCompatEnvironment.class);
|
||||
private static final String DATE_FORMAT = "yyyy_MM_dd_HH_mm_ss";
|
||||
private static final Random RANDOM = new Random();
|
||||
private final Path uri;
|
||||
private final Configuration conf;
|
||||
private FileSystem fs;
|
||||
private LocalFileSystem localFs;
|
||||
private Path rootDir;
|
||||
private Path baseDir;
|
||||
private String defaultLocalDir;
|
||||
private String[] defaultStoragePolicyNames;
|
||||
|
||||
public HdfsCompatEnvironment(Path uri, Configuration conf) {
|
||||
this.conf = conf;
|
||||
this.uri = uri;
|
||||
}
|
||||
|
||||
public void init() throws IOException {
|
||||
Date now = new Date();
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
String uniqueDir = "hadoop-compatibility-benchmark/" +
|
||||
new SimpleDateFormat(DATE_FORMAT).format(now) + "/" + uuid;
|
||||
|
||||
this.fs = uri.getFileSystem(conf);
|
||||
this.localFs = FileSystem.getLocal(conf);
|
||||
this.rootDir = fs.makeQualified(new Path("/"));
|
||||
this.baseDir = fs.makeQualified(new Path(uri, uniqueDir));
|
||||
String tmpdir = getEnvTmpDir();
|
||||
if ((tmpdir == null) || tmpdir.isEmpty()) {
|
||||
LOG.warn("Cannot get valid io.tmpdir, will use /tmp");
|
||||
tmpdir = "/tmp";
|
||||
}
|
||||
this.defaultLocalDir = new File(tmpdir, uniqueDir).getAbsolutePath();
|
||||
this.defaultStoragePolicyNames = getDefaultStoragePolicyNames();
|
||||
}
|
||||
|
||||
public FileSystem getFileSystem() {
|
||||
return fs;
|
||||
}
|
||||
|
||||
public LocalFileSystem getLocalFileSystem() {
|
||||
return localFs;
|
||||
}
|
||||
|
||||
public Path getRoot() {
|
||||
return rootDir;
|
||||
}
|
||||
|
||||
public Path getBase() {
|
||||
return baseDir;
|
||||
}
|
||||
|
||||
public String getLocalTmpDir() {
|
||||
final String scheme = this.uri.toUri().getScheme();
|
||||
final String key = "fs." + scheme + ".compatibility.local.tmpdir";
|
||||
final String localDir = conf.get(key, null);
|
||||
return (localDir != null) ? localDir : defaultLocalDir;
|
||||
}
|
||||
|
||||
public String getPrivilegedUser() {
|
||||
final String scheme = this.uri.toUri().getScheme();
|
||||
final String key = "fs." + scheme + ".compatibility.privileged.user";
|
||||
final String privileged = conf.get(key, null);
|
||||
return (privileged != null) ? privileged :
|
||||
conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
|
||||
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
|
||||
}
|
||||
|
||||
public String[] getStoragePolicyNames() {
|
||||
final String scheme = this.uri.toUri().getScheme();
|
||||
final String key = "fs." + scheme + ".compatibility.storage.policies";
|
||||
final String storagePolicies = conf.get(key, null);
|
||||
return (storagePolicies != null) ? storagePolicies.split(",") :
|
||||
defaultStoragePolicyNames.clone();
|
||||
}
|
||||
|
||||
public String getDelegationTokenRenewer() {
|
||||
final String scheme = this.uri.toUri().getScheme();
|
||||
final String key = "fs." + scheme + ".compatibility.delegation.token.renewer";
|
||||
return conf.get(key, "");
|
||||
}
|
||||
|
||||
private String getEnvTmpDir() {
|
||||
final String systemDefault = System.getProperty("java.io.tmpdir");
|
||||
if ((systemDefault == null) || systemDefault.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
String[] tmpDirs = systemDefault.split(",|" + File.pathSeparator);
|
||||
List<String> validDirs = Arrays.stream(tmpDirs).filter(
|
||||
s -> (s != null && !s.isEmpty())
|
||||
).collect(Collectors.toList());
|
||||
if (validDirs.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
final String tmpDir = validDirs.get(
|
||||
RANDOM.nextInt(validDirs.size()));
|
||||
return new File(tmpDir).getAbsolutePath();
|
||||
}
|
||||
|
||||
private String[] getDefaultStoragePolicyNames() {
|
||||
Collection<? extends BlockStoragePolicySpi> policies = null;
|
||||
try {
|
||||
policies = fs.getAllStoragePolicies();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Cannot get storage policy", e);
|
||||
}
|
||||
if ((policies == null) || policies.isEmpty()) {
|
||||
return new String[]{"Hot"};
|
||||
} else {
|
||||
return policies.stream().map(BlockStoragePolicySpi::getName).toArray(String[]::new);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
public class HdfsCompatIllegalArgumentException
|
||||
extends IllegalArgumentException {
|
||||
public HdfsCompatIllegalArgumentException(String message) {
|
||||
super(message);
|
||||
}
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
|
||||
public class HdfsCompatIllegalCaseException
|
||||
extends HadoopIllegalArgumentException {
|
||||
/**
|
||||
* Constructs exception with the specified detail message.
|
||||
* @param message detailed message.
|
||||
*/
|
||||
public HdfsCompatIllegalCaseException(final String message) {
|
||||
super(message);
|
||||
}
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
|
||||
public class HdfsCompatReport {
|
||||
private final String uri;
|
||||
private final HdfsCompatSuite suite;
|
||||
private final ConcurrentLinkedQueue<String> passed =
|
||||
new ConcurrentLinkedQueue<>();
|
||||
private final ConcurrentLinkedQueue<String> failed =
|
||||
new ConcurrentLinkedQueue<>();
|
||||
private final ConcurrentLinkedQueue<String> skipped =
|
||||
new ConcurrentLinkedQueue<>();
|
||||
|
||||
public HdfsCompatReport() {
|
||||
this(null, null);
|
||||
}
|
||||
|
||||
public HdfsCompatReport(String uri, HdfsCompatSuite suite) {
|
||||
this.uri = uri;
|
||||
this.suite = suite;
|
||||
}
|
||||
|
||||
public void addPassedCase(Collection<String> cases) {
|
||||
passed.addAll(cases);
|
||||
}
|
||||
|
||||
public void addFailedCase(Collection<String> cases) {
|
||||
failed.addAll(cases);
|
||||
}
|
||||
|
||||
public void addSkippedCase(Collection<String> cases) {
|
||||
skipped.addAll(cases);
|
||||
}
|
||||
|
||||
public void merge(HdfsCompatReport other) {
|
||||
this.passed.addAll(other.passed);
|
||||
this.failed.addAll(other.failed);
|
||||
this.skipped.addAll(other.skipped);
|
||||
}
|
||||
|
||||
public Collection<String> getPassedCase() {
|
||||
return passed;
|
||||
}
|
||||
|
||||
public Collection<String> getFailedCase() {
|
||||
return failed;
|
||||
}
|
||||
|
||||
public Collection<String> getSkippedCase() {
|
||||
return skipped;
|
||||
}
|
||||
|
||||
public String getUri() {
|
||||
return this.uri;
|
||||
}
|
||||
|
||||
public HdfsCompatSuite getSuite() {
|
||||
return this.suite;
|
||||
}
|
||||
}
|
@ -0,0 +1,406 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.util.*;
|
||||
|
||||
public class HdfsCompatShellScope {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HdfsCompatShellScope.class);
|
||||
private static final Random RANDOM = new Random();
|
||||
private final HdfsCompatEnvironment env;
|
||||
private final HdfsCompatSuite suite;
|
||||
private File stdoutDir = null;
|
||||
private File passList = null;
|
||||
private File failList = null;
|
||||
private File skipList = null;
|
||||
private Path snapshotPath = null;
|
||||
private String storagePolicy = null;
|
||||
private Method disallowSnapshot = null;
|
||||
|
||||
public HdfsCompatShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
this.env = env;
|
||||
this.suite = suite;
|
||||
}
|
||||
|
||||
public HdfsCompatReport apply() throws Exception {
|
||||
File localTmpDir = null;
|
||||
try {
|
||||
localTmpDir = new File(this.env.getLocalTmpDir());
|
||||
LOG.info("Local tmp dir: " + localTmpDir.getAbsolutePath());
|
||||
return runShell(localTmpDir);
|
||||
} finally {
|
||||
try {
|
||||
if (this.disallowSnapshot != null) {
|
||||
try {
|
||||
this.disallowSnapshot.invoke(this.env.getFileSystem(),
|
||||
this.snapshotPath);
|
||||
} catch (InvocationTargetException e) {
|
||||
LOG.error("Cannot disallow snapshot", e.getCause());
|
||||
} catch (ReflectiveOperationException e) {
|
||||
LOG.error("Disallow snapshot method is invalid", e);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
FileUtils.deleteQuietly(localTmpDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private HdfsCompatReport runShell(File localTmpDir) throws Exception {
|
||||
File localDir = new File(localTmpDir, "test");
|
||||
File scriptDir = new File(localTmpDir, "scripts");
|
||||
File confDir = new File(localTmpDir, "hadoop-conf");
|
||||
copyScriptsResource(scriptDir);
|
||||
try {
|
||||
setShellLogConf(confDir);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Cannot set new conf dir", e);
|
||||
confDir = null;
|
||||
}
|
||||
|
||||
prepareSnapshot();
|
||||
this.storagePolicy = getStoragePolicy();
|
||||
String[] confEnv = getEnv(localDir, scriptDir, confDir);
|
||||
ExecResult result = exec(confEnv, scriptDir);
|
||||
printLog(result);
|
||||
return export();
|
||||
}
|
||||
|
||||
private void copyScriptsResource(File scriptDir) throws IOException {
|
||||
Files.createDirectories(new File(scriptDir, "cases").toPath());
|
||||
copyResource("/misc.sh", new File(scriptDir, "misc.sh"));
|
||||
String[] cases = suite.getShellCases();
|
||||
for (String res : cases) {
|
||||
copyResource("/cases/" + res, new File(scriptDir, "cases/" + res));
|
||||
}
|
||||
}
|
||||
|
||||
private void setShellLogConf(File confDir) throws IOException {
|
||||
final String hadoopHome = System.getenv("HADOOP_HOME");
|
||||
final String hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
|
||||
if ((hadoopHome == null) || hadoopHome.isEmpty()) {
|
||||
LOG.error("HADOOP_HOME not configured");
|
||||
}
|
||||
if ((hadoopConfDir == null) || hadoopConfDir.isEmpty()) {
|
||||
throw new IOException("HADOOP_CONF_DIR not configured");
|
||||
}
|
||||
File srcDir = new File(hadoopConfDir).getAbsoluteFile();
|
||||
if (!srcDir.isDirectory()) {
|
||||
throw new IOException("HADOOP_CONF_DIR is not valid: " + srcDir);
|
||||
}
|
||||
|
||||
Files.createDirectories(confDir.toPath());
|
||||
FileUtils.copyDirectory(srcDir, confDir);
|
||||
File logConfFile = new File(confDir, "log4j.properties");
|
||||
copyResource("/hadoop-compat-bench-log4j.properties", logConfFile, true);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected void copyResource(String res, File dst) throws IOException {
|
||||
copyResource(res, dst, false);
|
||||
}
|
||||
|
||||
private void copyResource(String res, File dst, boolean overwrite)
|
||||
throws IOException {
|
||||
InputStream in = null;
|
||||
try {
|
||||
in = this.getClass().getResourceAsStream(res);
|
||||
if (in == null) {
|
||||
in = this.suite.getClass().getResourceAsStream(res);
|
||||
}
|
||||
if (in == null) {
|
||||
throw new IOException("Resource not found" +
|
||||
" during scripts prepare: " + res);
|
||||
}
|
||||
|
||||
if (dst.exists() && !overwrite) {
|
||||
throw new IOException("Cannot overwrite existing resource file");
|
||||
}
|
||||
|
||||
Files.createDirectories(dst.getParentFile().toPath());
|
||||
|
||||
byte[] buf = new byte[1024];
|
||||
try (OutputStream out = new FileOutputStream(dst)) {
|
||||
int nRead = in.read(buf);
|
||||
while (nRead != -1) {
|
||||
out.write(buf, 0, nRead);
|
||||
nRead = in.read(buf);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (in != null) {
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void prepareSnapshot() {
|
||||
this.snapshotPath = AbstractHdfsCompatCase.getUniquePath(this.env.getBase());
|
||||
Method allowSnapshot = null;
|
||||
try {
|
||||
FileSystem fs = this.env.getFileSystem();
|
||||
fs.mkdirs(snapshotPath);
|
||||
Method allowSnapshotMethod = fs.getClass()
|
||||
.getMethod("allowSnapshot", Path.class);
|
||||
allowSnapshotMethod.setAccessible(true);
|
||||
allowSnapshotMethod.invoke(fs, snapshotPath);
|
||||
allowSnapshot = allowSnapshotMethod;
|
||||
|
||||
Method disallowSnapshotMethod = fs.getClass()
|
||||
.getMethod("disallowSnapshot", Path.class);
|
||||
disallowSnapshotMethod.setAccessible(true);
|
||||
this.disallowSnapshot = disallowSnapshotMethod;
|
||||
} catch (IOException e) {
|
||||
LOG.error("Cannot prepare snapshot path", e);
|
||||
} catch (InvocationTargetException e) {
|
||||
LOG.error("Cannot allow snapshot", e.getCause());
|
||||
} catch (ReflectiveOperationException e) {
|
||||
LOG.warn("Get admin snapshot methods failed.");
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Prepare snapshot failed", e);
|
||||
}
|
||||
if (allowSnapshot == null) {
|
||||
LOG.warn("No allowSnapshot method found.");
|
||||
}
|
||||
if (this.disallowSnapshot == null) {
|
||||
LOG.warn("No disallowSnapshot method found.");
|
||||
}
|
||||
}
|
||||
|
||||
private String getStoragePolicy() {
|
||||
BlockStoragePolicySpi def;
|
||||
String[] policies;
|
||||
try {
|
||||
FileSystem fs = this.env.getFileSystem();
|
||||
Path base = this.env.getBase();
|
||||
fs.mkdirs(base);
|
||||
def = fs.getStoragePolicy(base);
|
||||
policies = env.getStoragePolicyNames();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Cannot get storage policy", e);
|
||||
return "Hot";
|
||||
}
|
||||
|
||||
List<String> differentPolicies = new ArrayList<>();
|
||||
for (String policyName : policies) {
|
||||
if ((def == null) || !policyName.equalsIgnoreCase(def.getName())) {
|
||||
differentPolicies.add(policyName);
|
||||
}
|
||||
}
|
||||
if (differentPolicies.isEmpty()) {
|
||||
final String defPolicyName;
|
||||
if ((def == null) || (def.getName() == null)) {
|
||||
defPolicyName = "Hot";
|
||||
LOG.warn("No valid storage policy name found, use Hot.");
|
||||
} else {
|
||||
defPolicyName = def.getName();
|
||||
LOG.warn("There is only one storage policy: " + defPolicyName);
|
||||
}
|
||||
return defPolicyName;
|
||||
} else {
|
||||
return differentPolicies.get(
|
||||
RANDOM.nextInt(differentPolicies.size()));
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected String[] getEnv(File localDir, File scriptDir, File confDir)
|
||||
throws IOException {
|
||||
List<String> confEnv = new ArrayList<>();
|
||||
final Map<String, String> environments = System.getenv();
|
||||
for (Map.Entry<String, String> entry : environments.entrySet()) {
|
||||
confEnv.add(entry.getKey() + "=" + entry.getValue());
|
||||
}
|
||||
if (confDir != null) {
|
||||
confEnv.add("HADOOP_CONF_DIR=" + confDir.getAbsolutePath());
|
||||
}
|
||||
|
||||
String timestamp = String.valueOf(System.currentTimeMillis());
|
||||
Path baseUri = new Path(this.env.getBase(), timestamp);
|
||||
File localUri = new File(localDir, timestamp).getAbsoluteFile();
|
||||
File resultDir = new File(localDir, timestamp);
|
||||
Files.createDirectories(resultDir.toPath());
|
||||
this.stdoutDir = new File(resultDir, "output").getAbsoluteFile();
|
||||
this.passList = new File(resultDir, "passed").getAbsoluteFile();
|
||||
this.failList = new File(resultDir, "failed").getAbsoluteFile();
|
||||
this.skipList = new File(resultDir, "skipped").getAbsoluteFile();
|
||||
Files.createFile(this.passList.toPath());
|
||||
Files.createFile(this.failList.toPath());
|
||||
Files.createFile(this.skipList.toPath());
|
||||
|
||||
final String prefix = "HADOOP_COMPAT_";
|
||||
confEnv.add(prefix + "BASE_URI=" + baseUri);
|
||||
confEnv.add(prefix + "LOCAL_URI=" + localUri.getAbsolutePath());
|
||||
confEnv.add(prefix + "SNAPSHOT_URI=" + snapshotPath.toString());
|
||||
confEnv.add(prefix + "STORAGE_POLICY=" + storagePolicy);
|
||||
confEnv.add(prefix + "STDOUT_DIR=" + stdoutDir.getAbsolutePath());
|
||||
confEnv.add(prefix + "PASS_FILE=" + passList.getAbsolutePath());
|
||||
confEnv.add(prefix + "FAIL_FILE=" + failList.getAbsolutePath());
|
||||
confEnv.add(prefix + "SKIP_FILE=" + skipList.getAbsolutePath());
|
||||
return confEnv.toArray(new String[0]);
|
||||
}
|
||||
|
||||
private ExecResult exec(String[] confEnv, File scriptDir)
|
||||
throws IOException, InterruptedException {
|
||||
Process process = Runtime.getRuntime().exec(
|
||||
"prove -r cases", confEnv, scriptDir);
|
||||
StreamPrinter out = new StreamPrinter(process.getInputStream());
|
||||
StreamPrinter err = new StreamPrinter(process.getErrorStream());
|
||||
out.start();
|
||||
err.start();
|
||||
int code = process.waitFor();
|
||||
out.join();
|
||||
err.join();
|
||||
return new ExecResult(code, out.lines, err.lines);
|
||||
}
|
||||
|
||||
private void printLog(ExecResult execResult) {
|
||||
LOG.info("Shell prove\ncode: {}\nstdout:\n\t{}\nstderr:\n\t{}",
|
||||
execResult.code, String.join("\n\t", execResult.out),
|
||||
String.join("\n\t", execResult.err));
|
||||
File casesRoot = new File(stdoutDir, "cases").getAbsoluteFile();
|
||||
String[] casesDirList = casesRoot.list();
|
||||
if (casesDirList == null) {
|
||||
LOG.error("stdout/stderr root directory is invalid: " + casesRoot);
|
||||
return;
|
||||
}
|
||||
Arrays.sort(casesDirList, (o1, o2) -> {
|
||||
if (o1.length() == o2.length()) {
|
||||
return o1.compareTo(o2);
|
||||
} else {
|
||||
return o1.length() - o2.length();
|
||||
}
|
||||
});
|
||||
for (String casesDir : casesDirList) {
|
||||
printCasesLog(new File(casesRoot, casesDir).getAbsoluteFile());
|
||||
}
|
||||
}
|
||||
|
||||
private void printCasesLog(File casesDir) {
|
||||
File stdout = new File(casesDir, "stdout").getAbsoluteFile();
|
||||
File stderr = new File(casesDir, "stderr").getAbsoluteFile();
|
||||
File[] stdoutFiles = stdout.listFiles();
|
||||
File[] stderrFiles = stderr.listFiles();
|
||||
Set<String> cases = new HashSet<>();
|
||||
if (stdoutFiles != null) {
|
||||
for (File c : stdoutFiles) {
|
||||
cases.add(c.getName());
|
||||
}
|
||||
}
|
||||
if (stderrFiles != null) {
|
||||
for (File c : stderrFiles) {
|
||||
cases.add(c.getName());
|
||||
}
|
||||
}
|
||||
String[] caseNames = cases.stream().sorted((o1, o2) -> {
|
||||
if (o1.length() == o2.length()) {
|
||||
return o1.compareTo(o2);
|
||||
} else {
|
||||
return o1.length() - o2.length();
|
||||
}
|
||||
}).toArray(String[]::new);
|
||||
for (String caseName : caseNames) {
|
||||
File stdoutFile = new File(stdout, caseName);
|
||||
File stderrFile = new File(stderr, caseName);
|
||||
try {
|
||||
List<String> stdoutLines = stdoutFile.exists() ?
|
||||
readLines(stdoutFile) : new ArrayList<>();
|
||||
List<String> stderrLines = stderrFile.exists() ?
|
||||
readLines(stderrFile) : new ArrayList<>();
|
||||
LOG.info("Shell case {} - #{}\nstdout:\n\t{}\nstderr:\n\t{}",
|
||||
casesDir.getName(), caseName,
|
||||
String.join("\n\t", stdoutLines),
|
||||
String.join("\n\t", stderrLines));
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Read shell stdout or stderr file failed", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private HdfsCompatReport export() throws IOException {
|
||||
HdfsCompatReport report = new HdfsCompatReport();
|
||||
report.addPassedCase(readLines(this.passList));
|
||||
report.addFailedCase(readLines(this.failList));
|
||||
report.addSkippedCase(readLines(this.skipList));
|
||||
return report;
|
||||
}
|
||||
|
||||
private List<String> readLines(File file) throws IOException {
|
||||
List<String> lines = new ArrayList<>();
|
||||
try (BufferedReader br = new BufferedReader(new InputStreamReader(
|
||||
new FileInputStream(file), StandardCharsets.UTF_8))) {
|
||||
String line = br.readLine();
|
||||
while (line != null) {
|
||||
lines.add(line);
|
||||
line = br.readLine();
|
||||
}
|
||||
}
|
||||
return lines;
|
||||
}
|
||||
|
||||
private static final class StreamPrinter extends Thread {
|
||||
private final InputStream in;
|
||||
private final List<String> lines;
|
||||
|
||||
private StreamPrinter(InputStream in) {
|
||||
this.in = in;
|
||||
this.lines = new ArrayList<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try (BufferedReader br = new BufferedReader(
|
||||
new InputStreamReader(in, StandardCharsets.UTF_8))) {
|
||||
String line = br.readLine();
|
||||
while (line != null) {
|
||||
this.lines.add(line);
|
||||
line = br.readLine();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ExecResult {
|
||||
private final int code;
|
||||
private final List<String> out;
|
||||
private final List<String> err;
|
||||
|
||||
private ExecResult(int code, List<String> out, List<String> err) {
|
||||
this.code = code;
|
||||
this.out = out;
|
||||
this.err = err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,27 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
public interface HdfsCompatSuite {
|
||||
String getSuiteName();
|
||||
|
||||
Class<? extends AbstractHdfsCompatCase>[] getApiCases();
|
||||
|
||||
String[] getShellCases();
|
||||
}
|
@ -0,0 +1,120 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
public final class HdfsCompatUtil {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HdfsCompatUtil.class);
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
private HdfsCompatUtil() {
|
||||
}
|
||||
|
||||
public static void checkImplementation(ImplementationFunction func) {
|
||||
try {
|
||||
func.apply();
|
||||
} catch (UnsupportedOperationException e) {
|
||||
throw e;
|
||||
} catch (NoSuchMethodError e) {
|
||||
if (HdfsCompatApiScope.SKIP_NO_SUCH_METHOD_ERROR) {
|
||||
throw e;
|
||||
} else {
|
||||
throw new UnsupportedOperationException(e);
|
||||
}
|
||||
} catch (Throwable ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
public static void createFile(FileSystem fs, Path file, long fileLen)
|
||||
throws IOException {
|
||||
createFile(fs, file, true, 1024, fileLen, 1048576L, (short) 1);
|
||||
}
|
||||
|
||||
public static void createFile(FileSystem fs, Path file, byte[] data)
|
||||
throws IOException {
|
||||
createFile(fs, file, true, data, 1048576L, (short) 1);
|
||||
}
|
||||
|
||||
public static void createFile(FileSystem fs, Path file, boolean overwrite,
|
||||
int bufferSize, long fileLen, long blockSize,
|
||||
short replication) throws IOException {
|
||||
assert (bufferSize > 0);
|
||||
try (FSDataOutputStream out = fs.create(file, overwrite,
|
||||
bufferSize, replication, blockSize)) {
|
||||
if (fileLen > 0) {
|
||||
byte[] toWrite = new byte[bufferSize];
|
||||
long bytesToWrite = fileLen;
|
||||
while (bytesToWrite > 0) {
|
||||
RANDOM.nextBytes(toWrite);
|
||||
int bytesToWriteNext = (bufferSize < bytesToWrite) ?
|
||||
bufferSize : (int) bytesToWrite;
|
||||
out.write(toWrite, 0, bytesToWriteNext);
|
||||
bytesToWrite -= bytesToWriteNext;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void createFile(FileSystem fs, Path file, boolean overwrite,
|
||||
byte[] data, long blockSize,
|
||||
short replication) throws IOException {
|
||||
try (FSDataOutputStream out = fs.create(file, overwrite,
|
||||
(data.length > 0) ? data.length : 1024, replication, blockSize)) {
|
||||
if (data.length > 0) {
|
||||
out.write(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static byte[] readFileBuffer(FileSystem fs, Path fileName)
|
||||
throws IOException {
|
||||
try (ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
FSDataInputStream in = fs.open(fileName)) {
|
||||
IOUtils.copyBytes(in, os, 1024, true);
|
||||
return os.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
public static void deleteQuietly(FileSystem fs, Path path,
|
||||
boolean recursive) {
|
||||
if (fs != null && path != null) {
|
||||
try {
|
||||
fs.delete(path, recursive);
|
||||
} catch (Throwable e) {
|
||||
LOG.warn("When deleting {}", path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public interface ImplementationFunction {
|
||||
void apply() throws Exception;
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This contains the main code and definitions of the tool
|
||||
* {@link org.apache.hadoop.fs.compat.HdfsCompatTool}.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
@ -0,0 +1,24 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* HdfsCompatibility is a benchmark tool to quickly assess availabilities
|
||||
* of Hadoop-Compatible File System APIs defined in
|
||||
* {@link org.apache.hadoop.fs.FileSystem} for a specific FS implementation.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat;
|
@ -0,0 +1,63 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.suites;
|
||||
|
||||
import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
|
||||
import org.apache.hadoop.fs.compat.cases.*;
|
||||
|
||||
public class HdfsCompatSuiteForAll implements HdfsCompatSuite {
|
||||
@Override
|
||||
public String getSuiteName() {
|
||||
return "ALL";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
|
||||
return new Class[]{
|
||||
HdfsCompatBasics.class,
|
||||
HdfsCompatAcl.class,
|
||||
HdfsCompatCreate.class,
|
||||
HdfsCompatDirectory.class,
|
||||
HdfsCompatFile.class,
|
||||
HdfsCompatLocal.class,
|
||||
HdfsCompatServer.class,
|
||||
HdfsCompatSnapshot.class,
|
||||
HdfsCompatStoragePolicy.class,
|
||||
HdfsCompatSymlink.class,
|
||||
HdfsCompatXAttr.class,
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getShellCases() {
|
||||
return new String[]{
|
||||
"directory.t",
|
||||
"fileinfo.t",
|
||||
"read.t",
|
||||
"write.t",
|
||||
"remove.t",
|
||||
"attr.t",
|
||||
"copy.t",
|
||||
"move.t",
|
||||
"concat.t",
|
||||
"snapshot.t",
|
||||
"storagePolicy.t",
|
||||
};
|
||||
}
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.suites;
|
||||
|
||||
import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
|
||||
|
||||
public class HdfsCompatSuiteForShell implements HdfsCompatSuite {
|
||||
@Override
|
||||
public String getSuiteName() {
|
||||
return "Shell";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
|
||||
return new Class[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getShellCases() {
|
||||
return new String[]{
|
||||
"directory.t",
|
||||
"fileinfo.t",
|
||||
"read.t",
|
||||
"write.t",
|
||||
"remove.t",
|
||||
"attr.t",
|
||||
"copy.t",
|
||||
"move.t",
|
||||
"concat.t",
|
||||
"snapshot.t",
|
||||
"storagePolicy.t",
|
||||
};
|
||||
}
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.suites;
|
||||
|
||||
import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
|
||||
import org.apache.hadoop.fs.compat.cases.HdfsCompatTpcds;
|
||||
|
||||
public class HdfsCompatSuiteForTpcds implements HdfsCompatSuite {
|
||||
@Override
|
||||
public String getSuiteName() {
|
||||
return "TPCDS";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
|
||||
return new Class[]{
|
||||
HdfsCompatTpcds.class
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getShellCases() {
|
||||
return new String[0];
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This contains default suites for
|
||||
* {@link org.apache.hadoop.fs.compat.HdfsCompatTool} command.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.suites;
|
@ -0,0 +1,24 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# log4j configuration used during build and unit tests
|
||||
|
||||
log4j.rootLogger=info,stderr
|
||||
log4j.threshold=ALL
|
||||
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stderr.Target=System.err
|
||||
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
|
@ -0,0 +1,101 @@
|
||||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# Compatibility Benchmark over HCFS Implementations
|
||||
|
||||
<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
|
||||
|
||||
## <a name="Overview"></a> Overview
|
||||
|
||||
Hadoop-Compatible File System (HCFS) is a core conception in big data storage ecosystem,
|
||||
providing unified interfaces and generally clear semantics,
|
||||
and has become the de-factor standard for industry storage systems to follow and conform with.
|
||||
There have been a series of HCFS implementations in Hadoop,
|
||||
such as S3AFileSystem for Amazon's S3 Object Store,
|
||||
WASB for Microsoft's Azure Blob Storage, OSS connector for Alibaba Cloud Object Storage,
|
||||
and more from storage service's providers on their own.
|
||||
|
||||
Meanwhile, Hadoop is also developing and new features are continuously contributing to HCFS interfaces
|
||||
for existing implementations to follow and update.
|
||||
However, we need a tool to check whether the features are supported by a specific implementation.
|
||||
|
||||
This module defines an HCFS compatibility benchmark and provides a corresponding tool
|
||||
to do the compatibility assessment for an HCFS storage system.
|
||||
The tool is a jar file which is executable by `hadoop jar`,
|
||||
after which an HCFS compatibility report is generated showing an overall score,
|
||||
and a detailed list of passed and failed cases (optional).
|
||||
|
||||
## <a name="Prepare"></a> Prepare
|
||||
|
||||
First of all, there must be a properly installed Hadoop environment to run `hadoop jar` command.
|
||||
See [HdfsUserGuide](./HdfsUserGuide.html) for more information about how to set up a Hadoop environment.
|
||||
Then, two things should be done before a quick benchmark assessment.
|
||||
|
||||
#### FileSystem implementation
|
||||
|
||||
There must be a Java FileSystem implementation.
|
||||
The FS is known to Hadoop by config key `fs.<scheme>impl`. `org.apache.hadoop.fs.s3a.S3AFileSystem`
|
||||
is an example, while for implementations that has not been directly supported by Hadoop community,
|
||||
an extra jar file is needed.
|
||||
|
||||
The jar file should be placed at the last part of hadoop classpath.
|
||||
A common practice is to modify `hadoop-env.sh` to append an extra classpath like:
|
||||
```shell
|
||||
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/hadoop/extra/classpath/*
|
||||
```
|
||||
Then we are able to place extra jar files to `/hadoop/extra/classpath/`
|
||||
|
||||
#### Optional configuration
|
||||
|
||||
Some FS APIs may need additional information to run.
|
||||
Additional information can be passed via optional configurations. There is an example:
|
||||
```xml
|
||||
<property>
|
||||
<name>fs.{scheme}.compatibility.storage.policies</name>
|
||||
<value>Hot,WARM,COLD</value>
|
||||
<description>
|
||||
Storage policy names used by HCFS compatibility benchmark tool.
|
||||
The config key is fs.{scheme}.compatibility.storage.policies.
|
||||
The config value is Comma-separated storage policy names for the FS.
|
||||
</description>
|
||||
</property>
|
||||
```
|
||||
Optional configurations are defined in `org.apache.hadoop.fs.compat.common.HdfsCompatEnvironment`.
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
hadoop jar hadoop-compat-bench.jar -uri <uri> [-suite <suite-name>] [-output <output-file>]
|
||||
```
|
||||
This command generates a report with text format, showing an overall score
|
||||
and optionally passed and failed case lists.
|
||||
|
||||
#### uri
|
||||
|
||||
`uri` points to the target storage service path that you'd like to evaluate.
|
||||
Some new files or directories would be created under the path.
|
||||
|
||||
#### suite
|
||||
|
||||
`suite-name` corresponds to a subset of all test cases.
|
||||
For example, a suite with name 'shell' contains only shell command cases.
|
||||
There are three default suites of the tool:
|
||||
* ALL: run all test cases of the benchmark. This is the default suite if `-suite` is absent.
|
||||
* Shell: run only shell command cases.
|
||||
* TPCDS: run cases for APIs that a TPC-DS program may require.
|
||||
|
||||
#### output
|
||||
|
||||
`output-file` points to a local file to save a detailed compatibility report document.
|
||||
The detailed report contains not only an overall score but also passed and failed case lists.
|
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#banner {
|
||||
height: 93px;
|
||||
background: none;
|
||||
}
|
||||
|
||||
#bannerLeft img {
|
||||
margin-left: 30px;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
#bannerRight img {
|
||||
margin: 17px;
|
||||
}
|
||||
|
@ -0,0 +1,68 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatCase;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatUtil;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class HdfsCompatAclTestCases extends AbstractHdfsCompatCase {
|
||||
@HdfsCompatCase
|
||||
public void modifyAclEntries() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().modifyAclEntries(makePath("modifyAclEntries"), new ArrayList<>())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeAclEntries() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().removeAclEntries(makePath("removeAclEntries"), new ArrayList<>())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeDefaultAcl() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().removeDefaultAcl(makePath("removeDefaultAcl"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void removeAcl() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().removeAcl(makePath("removeAcl"))
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void setAcl() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().setAcl(makePath("setAcl"), new ArrayList<>())
|
||||
);
|
||||
}
|
||||
|
||||
@HdfsCompatCase
|
||||
public void getAclStatus() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().getAclStatus(makePath("getAclStatus"))
|
||||
);
|
||||
}
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.cases;
|
||||
|
||||
import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatCase;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatUtil;
|
||||
|
||||
public class HdfsCompatMkdirTestCases extends AbstractHdfsCompatCase {
|
||||
@HdfsCompatCase
|
||||
public void mkdirs() {
|
||||
HdfsCompatUtil.checkImplementation(() ->
|
||||
fs().mkdirs(makePath("mkdir"))
|
||||
);
|
||||
}
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
import org.apache.hadoop.fs.compat.HdfsCompatTool;
|
||||
import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster;
|
||||
import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestCommand;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestHdfsCompatDefaultSuites {
|
||||
@Test
|
||||
public void testSuiteAll() throws Exception {
|
||||
HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster();
|
||||
try {
|
||||
cluster.start();
|
||||
final String uri = cluster.getUri() + "/tmp";
|
||||
Configuration conf = cluster.getConf();
|
||||
HdfsCompatCommand cmd = new HdfsCompatTestCommand(uri, "ALL", conf);
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
Assert.assertEquals(0, report.getFailedCase().size());
|
||||
new HdfsCompatTool(conf).printReport(report, System.out);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSuiteTpcds() throws Exception {
|
||||
HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster();
|
||||
try {
|
||||
cluster.start();
|
||||
final String uri = cluster.getUri() + "/tmp";
|
||||
Configuration conf = cluster.getConf();
|
||||
HdfsCompatCommand cmd = new HdfsCompatTestCommand(uri, "TPCDS", conf);
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
Assert.assertEquals(0, report.getFailedCase().size());
|
||||
new HdfsCompatTool(conf).printReport(report, System.out);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,180 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
import org.apache.hadoop.fs.compat.HdfsCompatTool;
|
||||
import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.compat.cases.HdfsCompatAclTestCases;
|
||||
import org.apache.hadoop.fs.compat.cases.HdfsCompatMkdirTestCases;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class TestHdfsCompatFsCommand {
|
||||
@Test
|
||||
public void testDfsCompatibility() throws Exception {
|
||||
final String suite = "ALL";
|
||||
HdfsCompatMiniCluster cluster = null;
|
||||
try {
|
||||
cluster = new HdfsCompatMiniCluster();
|
||||
cluster.start();
|
||||
final String uri = cluster.getUri() + "/tmp";
|
||||
final Configuration conf = cluster.getConf();
|
||||
|
||||
HdfsCompatCommand cmd = new TestCommand(uri, suite, conf);
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
Assert.assertEquals(7, report.getPassedCase().size());
|
||||
Assert.assertEquals(0, report.getFailedCase().size());
|
||||
show(conf, report);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLocalFsCompatibility() throws Exception {
|
||||
final String uri = "file:///tmp/";
|
||||
final String suite = "ALL";
|
||||
final Configuration conf = new Configuration();
|
||||
HdfsCompatCommand cmd = new TestCommand(uri, suite, conf);
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
Assert.assertEquals(1, report.getPassedCase().size());
|
||||
Assert.assertEquals(6, report.getFailedCase().size());
|
||||
show(conf, report);
|
||||
cleanup(cmd, conf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFsCompatibilityWithSuite() throws Exception {
|
||||
final String uri = "file:///tmp/";
|
||||
final String suite = "acl";
|
||||
final Configuration conf = new Configuration();
|
||||
HdfsCompatCommand cmd = new TestCommand(uri, suite, conf);
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
Assert.assertEquals(0, report.getPassedCase().size());
|
||||
Assert.assertEquals(6, report.getFailedCase().size());
|
||||
show(conf, report);
|
||||
cleanup(cmd, conf);
|
||||
}
|
||||
|
||||
private void show(Configuration conf, HdfsCompatReport report) throws IOException {
|
||||
new HdfsCompatTool(conf).printReport(report, System.out);
|
||||
}
|
||||
|
||||
private void cleanup(HdfsCompatCommand cmd, Configuration conf) throws Exception {
|
||||
Path basePath = ((TestCommand) cmd).getBasePath();
|
||||
FileSystem fs = basePath.getFileSystem(conf);
|
||||
fs.delete(basePath, true);
|
||||
}
|
||||
|
||||
private static final class TestCommand extends HdfsCompatCommand {
|
||||
private TestCommand(String uri, String suiteName, Configuration conf) {
|
||||
super(uri, suiteName, conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Map<String, HdfsCompatSuite> getDefaultSuites() {
|
||||
Map<String, HdfsCompatSuite> defaultSuites = new HashMap<>();
|
||||
defaultSuites.put("all", new AllTestSuite());
|
||||
defaultSuites.put("mkdir", new MkdirTestSuite());
|
||||
defaultSuites.put("acl", new AclTestSuite());
|
||||
return defaultSuites;
|
||||
}
|
||||
|
||||
private Path getBasePath() throws ReflectiveOperationException {
|
||||
Field apiField = HdfsCompatCommand.class.getDeclaredField("api");
|
||||
apiField.setAccessible(true);
|
||||
HdfsCompatApiScope api = (HdfsCompatApiScope) apiField.get(this);
|
||||
Field envField = api.getClass().getDeclaredField("env");
|
||||
envField.setAccessible(true);
|
||||
HdfsCompatEnvironment env = (HdfsCompatEnvironment) envField.get(api);
|
||||
return env.getBase();
|
||||
}
|
||||
}
|
||||
|
||||
private static class AllTestSuite implements HdfsCompatSuite {
|
||||
@Override
|
||||
public String getSuiteName() {
|
||||
return "All (Test)";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
|
||||
return new Class[]{
|
||||
HdfsCompatMkdirTestCases.class,
|
||||
HdfsCompatAclTestCases.class,
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getShellCases() {
|
||||
return new String[0];
|
||||
}
|
||||
}
|
||||
|
||||
private static class MkdirTestSuite implements HdfsCompatSuite {
|
||||
@Override
|
||||
public String getSuiteName() {
|
||||
return "Mkdir";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
|
||||
return new Class[]{
|
||||
HdfsCompatMkdirTestCases.class,
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getShellCases() {
|
||||
return new String[0];
|
||||
}
|
||||
}
|
||||
|
||||
private static class AclTestSuite implements HdfsCompatSuite {
|
||||
@Override
|
||||
public String getSuiteName() {
|
||||
return "ACL";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
|
||||
return new Class[]{
|
||||
HdfsCompatAclTestCases.class,
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getShellCases() {
|
||||
return new String[0];
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
import org.apache.hadoop.fs.compat.cases.HdfsCompatBasics;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
public class TestHdfsCompatInterfaceCoverage {
|
||||
@Test
|
||||
@Ignore
|
||||
public void testFsCompatibility() {
|
||||
Set<String> publicMethods = getPublicInterfaces(FileSystem.class);
|
||||
Set<String> targets = getTargets(HdfsCompatBasics.class);
|
||||
for (String publicMethod : publicMethods) {
|
||||
Assert.assertTrue("Method not tested: " + publicMethod,
|
||||
targets.contains(publicMethod));
|
||||
}
|
||||
}
|
||||
|
||||
private Set<String> getPublicInterfaces(Class<?> cls) {
|
||||
return HdfsCompatApiScope.getPublicInterfaces(cls);
|
||||
}
|
||||
|
||||
private Set<String> getTargets(Class<? extends AbstractHdfsCompatCase> cls) {
|
||||
Method[] methods = cls.getDeclaredMethods();
|
||||
Set<String> targets = new HashSet<>();
|
||||
for (Method method : methods) {
|
||||
if (method.isAnnotationPresent(HdfsCompatCase.class)) {
|
||||
targets.add(method.getName());
|
||||
}
|
||||
}
|
||||
return targets;
|
||||
}
|
||||
}
|
@ -0,0 +1,128 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.common;
|
||||
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.fs.compat.HdfsCompatTool;
|
||||
import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster;
|
||||
import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestCommand;
|
||||
import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestShellScope;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
||||
public class TestHdfsCompatShellCommand {
|
||||
private HdfsCompatMiniCluster cluster;
|
||||
|
||||
@Before
|
||||
public void runCluster() throws IOException {
|
||||
this.cluster = new HdfsCompatMiniCluster();
|
||||
this.cluster.start();
|
||||
}
|
||||
|
||||
@After
|
||||
public void shutdownCluster() {
|
||||
this.cluster.shutdown();
|
||||
this.cluster = null;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDfsCompatibility() throws Exception {
|
||||
final String uri = cluster.getUri() + "/tmp";
|
||||
final Configuration conf = cluster.getConf();
|
||||
HdfsCompatCommand cmd = new TestCommand(uri, conf);
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
Assert.assertEquals(3, report.getPassedCase().size());
|
||||
Assert.assertEquals(0, report.getFailedCase().size());
|
||||
show(conf, report);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipCompatibility() throws Exception {
|
||||
final String uri = cluster.getUri() + "/tmp";
|
||||
final Configuration conf = cluster.getConf();
|
||||
HdfsCompatCommand cmd = new TestSkipCommand(uri, conf);
|
||||
cmd.initialize();
|
||||
HdfsCompatReport report = cmd.apply();
|
||||
Assert.assertEquals(2, report.getPassedCase().size());
|
||||
Assert.assertEquals(0, report.getFailedCase().size());
|
||||
show(conf, report);
|
||||
}
|
||||
|
||||
private void show(Configuration conf, HdfsCompatReport report) throws IOException {
|
||||
new HdfsCompatTool(conf).printReport(report, System.out);
|
||||
}
|
||||
|
||||
private static final class TestCommand extends HdfsCompatTestCommand {
|
||||
private TestCommand(String uri, Configuration conf) {
|
||||
super(uri, "shell", conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
return new TestShellScope(env, suite);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class TestSkipCommand extends HdfsCompatTestCommand {
|
||||
private TestSkipCommand(String uri, Configuration conf) {
|
||||
super(uri, "shell", conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
return new TestShellScopeForSkip(env, suite);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class TestShellScope extends HdfsCompatTestShellScope {
|
||||
private TestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
super(env, suite);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void replace(File scriptDir) throws IOException {
|
||||
File casesDir = new File(scriptDir, "cases");
|
||||
FileUtils.deleteDirectory(casesDir);
|
||||
Files.createDirectories(casesDir.toPath());
|
||||
copyResource("/test-case-simple.t", new File(casesDir, "test-case-simple.t"));
|
||||
}
|
||||
}
|
||||
|
||||
private static final class TestShellScopeForSkip extends HdfsCompatTestShellScope {
|
||||
private TestShellScopeForSkip(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
super(env, suite);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void replace(File scriptDir) throws IOException {
|
||||
File casesDir = new File(scriptDir, "cases");
|
||||
FileUtils.deleteDirectory(casesDir);
|
||||
Files.createDirectories(casesDir.toPath());
|
||||
copyResource("/test-case-skip.t", new File(casesDir, "test-case-skip.t"));
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,114 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.hdfs;
|
||||
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.URI;
|
||||
|
||||
|
||||
public class HdfsCompatMiniCluster {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HdfsCompatMiniCluster.class);
|
||||
|
||||
private MiniDFSCluster cluster = null;
|
||||
|
||||
public HdfsCompatMiniCluster() {
|
||||
}
|
||||
|
||||
public synchronized void start() throws IOException {
|
||||
FileSystem.enableSymlinks();
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, "true");
|
||||
conf.set(DFSConfigKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
|
||||
"kms://http@localhost:9600/kms/foo");
|
||||
conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, "external");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
|
||||
conf.set("fs.hdfs.compatibility.privileged.user",
|
||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
cluster.waitClusterUp();
|
||||
}
|
||||
|
||||
public synchronized void shutdown() {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown(true);
|
||||
cluster = null;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized Configuration getConf() throws IOException {
|
||||
if (cluster == null) {
|
||||
throw new IOException("Cluster not running");
|
||||
}
|
||||
return cluster.getFileSystem().getConf();
|
||||
}
|
||||
|
||||
public synchronized URI getUri() throws IOException {
|
||||
if (cluster == null) {
|
||||
throw new IOException("Cluster not running");
|
||||
}
|
||||
return cluster.getFileSystem().getUri();
|
||||
}
|
||||
|
||||
public static void main(String[] args)
|
||||
throws IOException, InterruptedException {
|
||||
long duration = 5L * 60L * 1000L;
|
||||
if ((args != null) && (args.length > 0)) {
|
||||
duration = Long.parseLong(args[0]);
|
||||
}
|
||||
|
||||
HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster();
|
||||
try {
|
||||
cluster.start();
|
||||
Configuration conf = cluster.getConf();
|
||||
|
||||
final String confDir = System.getenv("HADOOP_CONF_DIR");
|
||||
final File confFile = new File(confDir, "core-site.xml");
|
||||
try (OutputStream out = new FileOutputStream(confFile)) {
|
||||
conf.writeXml(out);
|
||||
}
|
||||
|
||||
final long endTime = System.currentTimeMillis() + duration;
|
||||
long sleepTime = getSleepTime(endTime);
|
||||
while (sleepTime > 0) {
|
||||
LOG.warn("Service running ...");
|
||||
Thread.sleep(sleepTime);
|
||||
sleepTime = getSleepTime(endTime);
|
||||
}
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private static long getSleepTime(long endTime) {
|
||||
long maxTime = endTime - System.currentTimeMillis();
|
||||
return (maxTime < 5000) ? maxTime : 5000;
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatCommand;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatEnvironment;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatShellScope;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
public class HdfsCompatTestCommand extends HdfsCompatCommand {
|
||||
public HdfsCompatTestCommand(String uri, String suiteName, Configuration conf) {
|
||||
super(uri, suiteName, conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize() throws IOException, ReflectiveOperationException {
|
||||
super.initialize();
|
||||
Field shellField = HdfsCompatCommand.class.getDeclaredField("shell");
|
||||
shellField.setAccessible(true);
|
||||
HdfsCompatShellScope shell = (HdfsCompatShellScope) shellField.get(this);
|
||||
if (shell != null) {
|
||||
Field envField = shell.getClass().getDeclaredField("env");
|
||||
envField.setAccessible(true);
|
||||
HdfsCompatEnvironment env = (HdfsCompatEnvironment) envField.get(shell);
|
||||
Field suiteField = HdfsCompatCommand.class.getDeclaredField("suite");
|
||||
suiteField.setAccessible(true);
|
||||
HdfsCompatSuite suite = (HdfsCompatSuite) suiteField.get(this);
|
||||
shellField.set(this, getShellScope(env, suite));
|
||||
}
|
||||
}
|
||||
|
||||
protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
return new HdfsCompatTestShellScope(env, suite);
|
||||
}
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.compat.hdfs;
|
||||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatEnvironment;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatShellScope;
|
||||
import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class HdfsCompatTestShellScope extends HdfsCompatShellScope {
|
||||
private final HdfsCompatEnvironment env;
|
||||
|
||||
public HdfsCompatTestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
|
||||
super(env, suite);
|
||||
this.env = env;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String[] getEnv(File localDir, File scriptDir, File confDir)
|
||||
throws IOException {
|
||||
replace(scriptDir);
|
||||
File binDir = new File(scriptDir, "bin");
|
||||
copyToBin(binDir);
|
||||
confDir = new File(scriptDir, "hadoop-conf-ut");
|
||||
writeConf(confDir);
|
||||
File logConfFile = new File(confDir, "log4j.properties");
|
||||
copyResource("/hadoop-compat-bench-log4j.properties", logConfFile);
|
||||
|
||||
String javaHome = System.getProperty("java.home");
|
||||
String javaBin = javaHome + File.separator + "bin" +
|
||||
File.separator + "java";
|
||||
String classpath = confDir.getAbsolutePath() + ":" +
|
||||
System.getProperty("java.class.path");
|
||||
String pathenv = System.getenv("PATH");
|
||||
if ((pathenv == null) || pathenv.isEmpty()) {
|
||||
pathenv = binDir.getAbsolutePath();
|
||||
} else {
|
||||
pathenv = binDir.getAbsolutePath() + ":" + pathenv;
|
||||
}
|
||||
|
||||
List<String> confEnv = new ArrayList<>();
|
||||
Collections.addAll(confEnv, super.getEnv(localDir, scriptDir, confDir));
|
||||
confEnv.add("HADOOP_COMPAT_JAVA_BIN=" + javaBin);
|
||||
confEnv.add("HADOOP_COMPAT_JAVA_CLASSPATH=" + classpath);
|
||||
confEnv.add("HADOOP_CONF_DIR=" + confDir.getAbsolutePath());
|
||||
confEnv.add("PATH=" + pathenv);
|
||||
return confEnv.toArray(new String[0]);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected void replace(File scriptDir) throws IOException {
|
||||
}
|
||||
|
||||
private void copyToBin(File binDir) throws IOException {
|
||||
Files.createDirectories(binDir.toPath());
|
||||
File hadoop = new File(binDir, "hadoop");
|
||||
File hdfs = new File(binDir, "hdfs");
|
||||
copyResource("/hadoop-compat-bench-test-shell-hadoop.sh", hadoop);
|
||||
copyResource("/hadoop-compat-bench-test-shell-hdfs.sh", hdfs);
|
||||
if (!hadoop.setReadable(true, false) ||
|
||||
!hadoop.setWritable(true, false) ||
|
||||
!hadoop.setExecutable(true, false)) {
|
||||
throw new IOException("No permission to hadoop shell.");
|
||||
}
|
||||
if (!hdfs.setReadable(true, false) ||
|
||||
!hdfs.setWritable(true, false) ||
|
||||
!hdfs.setExecutable(true, false)) {
|
||||
throw new IOException("No permission to hdfs shell.");
|
||||
}
|
||||
}
|
||||
|
||||
private void writeConf(File confDir) throws IOException {
|
||||
Files.createDirectories(confDir.toPath());
|
||||
if (!confDir.setReadable(true, false) ||
|
||||
!confDir.setWritable(true, false) ||
|
||||
!confDir.setExecutable(true, false)) {
|
||||
throw new IOException("No permission to conf dir.");
|
||||
}
|
||||
File confFile = new File(confDir, "core-site.xml");
|
||||
try (OutputStream out = new FileOutputStream(confFile)) {
|
||||
this.env.getFileSystem().getConf().writeXml(out);
|
||||
}
|
||||
if (!confFile.setReadable(true, false) ||
|
||||
!confFile.setWritable(true, false) ||
|
||||
!confFile.setExecutable(true, false)) {
|
||||
throw new IOException("No permission to conf file.");
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
cmd="${1}"
|
||||
shift
|
||||
|
||||
if [ X"${cmd}" != X"fs" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
javaBin="${HADOOP_COMPAT_JAVA_BIN}"
|
||||
javaCp="${HADOOP_COMPAT_JAVA_CLASSPATH}"
|
||||
fsShell="org.apache.hadoop.fs.FsShell"
|
||||
|
||||
$javaBin -cp "${javaCp}" "${fsShell}" "$@"
|
@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
cmd="${1}"
|
||||
shift
|
||||
|
||||
if [ X"${cmd}" != X"dfs" ] && [ X"${cmd}" != X"storagepolicies" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
javaBin="${HADOOP_COMPAT_JAVA_BIN}"
|
||||
javaCp="${HADOOP_COMPAT_JAVA_CLASSPATH}"
|
||||
if [ X"${cmd}" = X"dfs" ]; then
|
||||
fsShell="org.apache.hadoop.fs.FsShell"
|
||||
else
|
||||
fsShell="org.apache.hadoop.hdfs.tools.StoragePolicyAdmin"
|
||||
fi
|
||||
|
||||
$javaBin -cp "${javaCp}" "${fsShell}" "$@"
|
@ -0,0 +1,26 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "Hello World!" > "${localDir}/dat"
|
||||
|
||||
echo "1..3"
|
||||
|
||||
expect_ret "mkdir (ut)" 0 hadoop fs -mkdir -p "${baseDir}/dir"
|
||||
expect_ret "put (ut)" 0 hadoop fs -put "${localDir}/dat" "${baseDir}/dir/"
|
||||
expect_ret "rm (ut)" 0 hadoop fs -rm -r -skipTrash "${baseDir}/dir"
|
@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
. $(dirname "$0")/../misc.sh
|
||||
|
||||
echo "1..3"
|
||||
|
||||
expect_ret "mkdir (ut)" 0 hadoop fs -mkdir -p "${baseDir}/dir"
|
||||
expect_ret "nonExistCommand (ut)" 0 hadoop fs -nonExistCommand "${baseDir}/dir"
|
||||
expect_ret "rm (ut)" 0 hadoop fs -rm -r -skipTrash "${baseDir}/dir"
|
@ -52,6 +52,7 @@
|
||||
<module>hadoop-aliyun</module>
|
||||
<module>hadoop-fs2img</module>
|
||||
<module>hadoop-benchmark</module>
|
||||
<module>hadoop-compat-bench</module>
|
||||
</modules>
|
||||
|
||||
<build>
|
||||
|
Loading…
Reference in New Issue
Block a user