viewfs
+ * Returns false as it does not support to add fallback link automatically on
+ * no mounts.
*/
- String getType() {
- return FsConstants.VIEWFS_TYPE;
+ boolean supportAutoAddingFallbackOnNoMounts() {
+ return false;
}
+
/**
* Called after a new FileSystem instance is constructed.
* @param theUri a uri whose authority section names the host, port, etc. for
@@ -293,7 +294,7 @@ public void initialize(final URI theUri, final Configuration conf)
try {
myUri = new URI(getScheme(), authority, "/", null, null);
boolean initingUriAsFallbackOnNoMounts =
- !FsConstants.VIEWFS_TYPE.equals(getType());
+ supportAutoAddingFallbackOnNoMounts();
fsState = new InodeTreeviewfs
+ * By default returns false as ViewFileSystemOverloadScheme supports auto
+ * adding fallback on no mounts.
*/
- String getType() {
- return FsConstants.VIEWFSOS_TYPE;
+ public boolean supportAutoAddingFallbackOnNoMounts() {
+ return this.supportAutoAddingFallbackOnNoMounts;
+ }
+
+ /**
+ * Sets whether to add fallback automatically when no mount points found.
+ */
+ public void setSupportAutoAddingFallbackOnNoMounts(
+ boolean addAutoFallbackOnNoMounts) {
+ this.supportAutoAddingFallbackOnNoMounts = addAutoFallbackOnNoMounts;
}
@Override
@@ -287,4 +296,62 @@ public FileSystem getRawFileSystem(Path path, Configuration conf)
}
}
+ /**
+ * Gets the mount path info, which contains the target file system and
+ * remaining path to pass to the target file system.
+ */
+ public MountPathInfo+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.PartialListing;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.PathHandle;
+import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
+import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
+import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+/**
+ * The ViewDistributedFileSystem is an extended class to DistributedFileSystem
+ * with additional mounting functionality. The goal is to have better API
+ * compatibility for HDFS users when using mounting
+ * filesystem(ViewFileSystemOverloadScheme).
+ * The ViewFileSystemOverloadScheme{@link ViewFileSystemOverloadScheme} is a new
+ * filesystem with inherited mounting functionality from ViewFileSystem.
+ * For the user who is using ViewFileSystemOverloadScheme by setting
+ * fs.hdfs.impl=org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme, now
+ * they can set fs.hdfs.impl=org.apache.hadoop.hdfs.ViewDistributedFileSystem.
+ * So, that the hdfs users will get closely compatible API with mount
+ * functionality. For the rest of all other schemes can continue to use
+ * ViewFileSystemOverloadScheme class directly for mount functionality. Please
+ * note that ViewFileSystemOverloadScheme provides only
+ * ViewFileSystem{@link ViewFileSystem} APIs.
+ * If user configured this class but no mount point configured? Then it will
+ * simply work as existing DistributedFileSystem class. If user configured both
+ * fs.hdfs.impl to this class and mount configurations, then users will be able
+ * to make calls the APIs available in this class, they are nothing but DFS
+ * APIs, but they will be delegated to viewfs functionality. Please note, APIs
+ * without any path in arguments( ex: isInSafeMode), will be delegated to
+ * default filesystem only, that is the configured fallback link. If you want to
+ * make these API calls on specific child filesystem, you may want to initialize
+ * them separately and call. In ViewDistributedFileSystem, we strongly recommend
+ * to configure linkFallBack when you add mount links and it's recommended to
+ * point be to your base cluster, usually your current fs.defaultFS if that's
+ * pointing to hdfs.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class ViewDistributedFileSystem extends DistributedFileSystem {
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(ViewDistributedFileSystem.class);
+
+ // A mounting file system.
+ private ViewFileSystemOverloadScheme vfs;
+ // A default DFS, which should have set via linkFallback
+ private DistributedFileSystem defaultDFS;
+
+ @Override
+ public void initialize(URI uri, Configuration conf) throws IOException {
+ super.initialize(uri, conf);
+ try {
+ this.vfs = tryInitializeMountingViewFs(uri, conf);
+ } catch (IOException ioe) {
+ LOGGER.debug(new StringBuilder("Mount tree initialization failed with ")
+ .append("the reason => {}. Falling back to regular DFS")
+ .append(" initialization. Please re-initialize the fs after updating")
+ .append(" mount point.").toString(), ioe.getMessage());
+ // Previous super.initialize would have skipped the dfsclient init and
+ // setWorkingDirectory as we planned to initialize vfs. Since vfs init
+ // failed, let's init dfsClient now.
+ super.initDFSClient(uri, conf);
+ super.setWorkingDirectory(super.getHomeDirectory());
+ return;
+ }
+
+ setConf(conf);
+ // A child DFS with the current initialized URI. This must be same as
+ // fallback fs. The fallback must point to root of your filesystems.
+ // Some APIs(without path in argument, for example isInSafeMode) will
+ // support only for base cluster filesystem. Only that APIs will use this
+ // fs.
+ defaultDFS = (DistributedFileSystem) this.vfs.getFallbackFileSystem();
+ // Please don't access internal dfs client directly except in tests.
+ dfs = (defaultDFS != null) ? defaultDFS.dfs : null;
+ super.setWorkingDirectory(this.vfs.getHomeDirectory());
+ }
+
+ @Override
+ void initDFSClient(URI uri, Configuration conf) throws IOException {
+ // Since we plan to initialize vfs in this class, we will not need to
+ // initialize DFS client.
+ }
+
+ public ViewDistributedFileSystem() {
+ }
+
+ private ViewFileSystemOverloadScheme tryInitializeMountingViewFs(URI theUri,
+ Configuration conf) throws IOException {
+ ViewFileSystemOverloadScheme viewFs = new ViewFileSystemOverloadScheme();
+ viewFs.setSupportAutoAddingFallbackOnNoMounts(false);
+ viewFs.initialize(theUri, conf);
+ return viewFs;
+ }
+
+ @Override
+ public URI getUri() {
+ if (this.vfs == null) {
+ return super.getUri();
+ }
+ return this.vfs.getUri();
+ }
+
+ @Override
+ public String getScheme() {
+ if (this.vfs == null) {
+ return super.getScheme();
+ }
+ return this.vfs.getScheme();
+ }
+
+ @Override
+ public Path getWorkingDirectory() {
+ if (this.vfs == null) {
+ return super.getWorkingDirectory();
+ }
+ return this.vfs.getWorkingDirectory();
+ }
+
+ @Override
+ public void setWorkingDirectory(Path dir) {
+ if (this.vfs == null) {
+ super.setWorkingDirectory(dir);
+ return;
+ }
+ this.vfs.setWorkingDirectory(dir);
+ }
+
+ @Override
+ public Path getHomeDirectory() {
+ if (super.dfs == null) {
+ return null;
+ }
+ if (this.vfs == null) {
+ return super.getHomeDirectory();
+ }
+ return this.vfs.getHomeDirectory();
+ }
+
+ /**
+ * Returns only default cluster getHedgedReadMetrics.
+ */
+ @Override
+ public DFSHedgedReadMetrics getHedgedReadMetrics() {
+ if (this.vfs == null) {
+ return super.getHedgedReadMetrics();
+ }
+ checkDefaultDFS(defaultDFS, "getHedgedReadMetrics");
+ return defaultDFS.getHedgedReadMetrics();
+ }
+
+ @Override
+ public BlockLocation[] getFileBlockLocations(FileStatus fs, long start,
+ long len) throws IOException {
+ if (this.vfs == null) {
+ return super.getFileBlockLocations(fs, start, len);
+ }
+ return this.vfs.getFileBlockLocations(fs, start, len);
+ }
+
+ @Override
+ public BlockLocation[] getFileBlockLocations(Path p, final long start,
+ final long len) throws IOException {
+ if (this.vfs == null) {
+ return super.getFileBlockLocations(p, start, len);
+ }
+ return this.vfs.getFileBlockLocations(p, start, len);
+ }
+
+ @Override
+ public void setVerifyChecksum(final boolean verifyChecksum) {
+ if (this.vfs == null) {
+ super.setVerifyChecksum(verifyChecksum);
+ return;
+ }
+ this.vfs.setVerifyChecksum(verifyChecksum);
+ }
+
+ @Override
+ public boolean recoverLease(final Path f) throws IOException {
+ ViewFileSystemOverloadScheme.MountPathInfo
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.ViewDistributedFileSystem;
+
+import java.io.IOException;
+import java.net.URI;
+
+public class TestCacheDirectivesWithViewDFS extends TestCacheDirectives {
+
+ @Override
+ public DistributedFileSystem getDFS() throws IOException {
+ Configuration conf = getConf();
+ conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName());
+ URI defaultFSURI =
+ URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
+ ConfigUtil.addLinkFallback(conf, defaultFSURI.getHost(),
+ new Path(defaultFSURI.toString()).toUri());
+ ConfigUtil.addLink(conf, defaultFSURI.getHost(), "/tmp",
+ new Path(defaultFSURI.toString()).toUri());
+ return super.getDFS();
+ }
+
+ @Override
+ public DistributedFileSystem getDFS(MiniDFSCluster cluster, int nnIdx)
+ throws IOException {
+ Configuration conf = cluster.getConfiguration(nnIdx);
+ conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName());
+ URI uri = cluster.getURI(0);
+ ConfigUtil.addLinkFallback(conf, uri.getHost(), uri);
+ ConfigUtil.addLink(conf, uri.getHost(), "/tmp", uri);
+ return cluster.getFileSystem(0);
+ }
+}