HDFS-11968. ViewFS: StoragePolicies commands fail with HDFS federation. Contributed by Mukul Kumar Singh.

This commit is contained in:
Arpit Agarwal 2017-10-03 11:23:40 -07:00
parent 4d5dd75b60
commit b91305119b
5 changed files with 168 additions and 29 deletions

View File

@ -407,7 +407,8 @@ ResolveResult<T> resolve(final String p, final boolean resolveLastComponent)
for (int j = 1; j <= i; ++j) { for (int j = 1; j <= i; ++j) {
failedAt.append('/').append(path[j]); failedAt.append('/').append(path[j]);
} }
throw (new FileNotFoundException(failedAt.toString())); throw (new FileNotFoundException("File/Directory does not exist: "
+ failedAt.toString()));
} }
if (nextInode instanceof INodeLink) { if (nextInode instanceof INodeLink) {

View File

@ -19,8 +19,10 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -29,6 +31,7 @@
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
@ -100,11 +103,12 @@ public String getLongUsage() {
@Override @Override
public int run(Configuration conf, List<String> args) throws IOException { public int run(Configuration conf, List<String> args) throws IOException {
final DistributedFileSystem dfs = AdminHelper.getDFS(conf); final FileSystem fs = FileSystem.get(conf);
try { try {
Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies(); Collection<? extends BlockStoragePolicySpi> policies =
fs.getAllStoragePolicies();
System.out.println("Block Storage Policies:"); System.out.println("Block Storage Policies:");
for (BlockStoragePolicy policy : policies) { for (BlockStoragePolicySpi policy : policies) {
if (policy != null) { if (policy != null) {
System.out.println("\t" + policy); System.out.println("\t" + policy);
} }
@ -149,32 +153,43 @@ public int run(Configuration conf, List<String> args) throws IOException {
} }
Path p = new Path(path); Path p = new Path(path);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf); final FileSystem fs = FileSystem.get(conf);
try { try {
HdfsFileStatus status = dfs.getClient().getFileInfo( FileStatus status;
Path.getPathWithoutSchemeAndAuthority(p).toString()); try {
if (status == null) { status = fs.getFileStatus(p);
} catch (FileNotFoundException e) {
System.err.println("File/Directory does not exist: " + path); System.err.println("File/Directory does not exist: " + path);
return 2; return 2;
} }
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { if (status instanceof HdfsFileStatus) {
System.out.println("The storage policy of " + path + " is unspecified"); byte storagePolicyId = ((HdfsFileStatus)status).getStoragePolicy();
return 0; if (storagePolicyId ==
} HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies(); System.out.println("The storage policy of " + path
for (BlockStoragePolicy policy : policies) { + " is unspecified");
if (policy.getId() == storagePolicyId) {
System.out.println("The storage policy of " + path + ":\n" + policy);
return 0; return 0;
} }
Collection<? extends BlockStoragePolicySpi> policies =
fs.getAllStoragePolicies();
for (BlockStoragePolicySpi policy : policies) {
if (policy instanceof BlockStoragePolicy) {
if (((BlockStoragePolicy)policy).getId() == storagePolicyId) {
System.out.println("The storage policy of " + path
+ ":\n" + policy);
return 0;
}
}
}
} }
System.err.println(getName() + " is not supported for filesystem "
+ fs.getScheme() + " on path " + path);
return 2;
} catch (Exception e) { } catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e)); System.err.println(AdminHelper.prettifyException(e));
return 2; return 2;
} }
System.err.println("Cannot identify the storage policy for " + path);
return 2;
} }
} }
@ -218,9 +233,9 @@ public int run(Configuration conf, List<String> args) throws IOException {
return 1; return 1;
} }
Path p = new Path(path); Path p = new Path(path);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf); final FileSystem fs = FileSystem.get(conf);
try { try {
dfs.setStoragePolicy(p, policyName); fs.setStoragePolicy(p, policyName);
System.out.println("Set storage policy " + policyName + " on " + path); System.out.println("Set storage policy " + policyName + " on " + path);
} catch (Exception e) { } catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e)); System.err.println(AdminHelper.prettifyException(e));
@ -264,9 +279,9 @@ public int run(Configuration conf, List<String> args) throws IOException {
} }
Path p = new Path(path); Path p = new Path(path);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf); final FileSystem fs = FileSystem.get(conf);
try { try {
dfs.unsetStoragePolicy(p); fs.unsetStoragePolicy(p);
System.out.println("Unset storage policy from " + path); System.out.println("Unset storage policy from " + path);
} catch (Exception e) { } catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e)); System.err.println(AdminHelper.prettifyException(e));

View File

@ -18,11 +18,12 @@
package org.apache.hadoop.hdfs.tools; package org.apache.hadoop.hdfs.tools;
import java.io.IOException; import java.io.IOException;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@ -38,12 +39,12 @@ public class TestStoragePolicyCommands {
private static final short REPL = 1; private static final short REPL = 1;
private static final int SIZE = 128; private static final int SIZE = 128;
private static Configuration conf; protected static Configuration conf;
private static MiniDFSCluster cluster; protected static MiniDFSCluster cluster;
private static DistributedFileSystem fs; protected static FileSystem fs;
@Before @Before
public void clusterSetUp() throws IOException { public void clusterSetUp() throws IOException, URISyntaxException {
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
cluster.waitActive(); cluster.waitActive();

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
/**
* Test StoragePolicyAdmin commands with ViewFileSystem.
*/
public class TestViewFSStoragePolicyCommands extends TestStoragePolicyCommands {
@Before
public void clusterSetUp() throws IOException {
conf = new HdfsConfiguration();
String clusterName = "cluster";
cluster =
new MiniDFSCluster.Builder(conf).nnTopology(
MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitActive();
DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
FsConstants.VIEWFS_SCHEME +"://" + clusterName);
Path base1 = new Path("/user1");
Path base2 = new Path("/user2");
hdfs1.delete(base1, true);
hdfs2.delete(base2, true);
hdfs1.mkdirs(base1);
hdfs2.mkdirs(base2);
ConfigUtil.addLink(conf, clusterName, "/foo",
hdfs1.makeQualified(base1).toUri());
ConfigUtil.addLink(conf, clusterName, "/hdfs2",
hdfs2.makeQualified(base2).toUri());
fs = FileSystem.get(conf);
}
/**
* Storage policy operation on the viewfs root should fail.
*/
@Test
public void testStoragePolicyRoot() throws Exception {
final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /", 2,
"is not supported for filesystem viewfs on path /");
}
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.junit.Before;
import java.io.IOException;
import java.net.URISyntaxException;
/**
* Test StoragePolicyAdmin commands with WebHDFS.
*/
public class TestWebHDFSStoragePolicyCommands
extends TestStoragePolicyCommands {
@Before
public void clusterSetUp() throws IOException, URISyntaxException {
super.clusterSetUp();
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
fs.getUri().toString());
}
}