diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index efcfd9f687..c622f12c92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -236,3 +236,8 @@ Branch-2802 Snapshot (Unreleased) szetszwo) HDFS-4692. Use timestamp as default snapshot names. (szetszwo) + + HDFS-4666. Define ".snapshot" as a reserved inode name so that users cannot + create a file/directory with ".snapshot" as the name. If ".snapshot" is used + in a previous version of HDFS, it must be renamed before upgrade; otherwise, + upgrade will fail. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java index 2ee11f0c37..1c3e266b42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java @@ -97,4 +97,15 @@ public abstract class FSLimitException extends QuotaExceededException { " is exceeded: limit=" + quota + " items=" + count; } } + + /** The given name is illegal. */ + public static final class IllegalNameException extends FSLimitException { + public static final long serialVersionUID = 1L; + + public IllegalNameException() {} + + public IllegalNameException(String msg) { + super(msg); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index a296942f10..af6fc76d5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; /************************************ @@ -107,4 +109,10 @@ public class HdfsConstants { * A special path component contained in the path for a snapshot file/dir */ public static final String DOT_SNAPSHOT_DIR = ".snapshot"; + + public static final byte[] DOT_SNAPSHOT_DIR_BYTES + = DFSUtil.string2Bytes(DOT_SNAPSHOT_DIR); + + public static final String SEPARATOR_DOT_SNAPSHOT_DIR + = Path.SEPARATOR + DOT_SNAPSHOT_DIR; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java index 8bd26ae167..58667060c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java @@ -98,7 +98,7 @@ public class LayoutVersion { "add OP_UPDATE_BLOCKS"), RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT), ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false), - SNAPSHOT(-43, -42, "Support for snapshot feature", false); + SNAPSHOT(-43, "Support for snapshot feature"); final int lv; final int ancestorLV; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 650e29745e..67ef05cf49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -23,6 +23,7 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; @@ -41,11 +42,12 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.FSLimitException; +import org.apache.hadoop.hdfs.protocol.FSLimitException.IllegalNameException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -1355,7 +1357,7 @@ public class FSDirectory implements Closeable { readLock(); try { - if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) { + if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getSnapshotsListing(srcs, startAfter); } final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true); @@ -1393,10 +1395,10 @@ public class FSDirectory implements Closeable { */ private DirectoryListing getSnapshotsListing(String src, byte[] startAfter) throws UnresolvedLinkException, IOException { - assert hasReadLock(); - final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR; - Preconditions.checkArgument(src.endsWith(dotSnapshot), - src + " does not end with " + dotSnapshot); + Preconditions.checkState(hasReadLock()); + Preconditions.checkArgument( + src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), + "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); final String dirPath = normalizePath(src.substring(0, src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); @@ -1428,7 +1430,7 @@ public class FSDirectory implements Closeable { String srcs = normalizePath(src); readLock(); try { - if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) { + if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getFileInfo4DotSnapshot(srcs); } final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink); @@ -1442,9 +1444,9 @@ public class FSDirectory implements Closeable { private HdfsFileStatus getFileInfo4DotSnapshot(String src) throws UnresolvedLinkException { - final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR; - Preconditions.checkArgument(src.endsWith(dotSnapshot), - src + " does not end with " + dotSnapshot); + Preconditions.checkArgument( + src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), + "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); final String dirPath = normalizePath(src.substring(0, src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); @@ -1927,38 +1929,47 @@ public class FSDirectory implements Closeable { verifyQuota(dst, dstIndex, delta.get(Quota.NAMESPACE), delta.get(Quota.DISKSPACE), src[i - 1]); } + + /** Verify if the snapshot name is legal. */ + void verifySnapshotName(String snapshotName, String path) + throws PathComponentTooLongException, IllegalNameException { + final byte[] bytes = DFSUtil.string2Bytes(snapshotName); + verifyINodeName(bytes); + verifyMaxComponentLength(bytes, path, 0); + } - /** - * Verify that filesystem limit constraints are not violated - */ - void verifyFsLimits(INode[] pathComponents, int pos, INode child) - throws FSLimitException { - verifyMaxComponentLength(child.getLocalName(), pathComponents, pos); - verifyMaxDirItems(pathComponents, pos); + /** Verify if the inode name is legal. */ + void verifyINodeName(byte[] childName) throws IllegalNameException { + if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) { + String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name."; + if (!ready) { + s += " Please rename it before upgrade."; + } + throw new IllegalNameException(s); + } } /** * Verify child's name for fs limit. * @throws PathComponentTooLongException child's name is too long. */ - public void verifyMaxComponentLength(String childName, - Object parentPath, int pos) throws PathComponentTooLongException { + void verifyMaxComponentLength(byte[] childName, Object parentPath, int pos) + throws PathComponentTooLongException { if (maxComponentLength == 0) { return; } - final int length = childName.length(); + final int length = childName.length; if (length > maxComponentLength) { final String p = parentPath instanceof INode[]? getFullPathName((INode[])parentPath, pos - 1): (String)parentPath; final PathComponentTooLongException e = new PathComponentTooLongException( - maxComponentLength, length, p, childName); + maxComponentLength, length, p, DFSUtil.bytes2String(childName)); if (ready) { throw e; } else { // Do not throw if edits log is still being processed - NameNode.LOG.error("FSDirectory.verifyMaxComponentLength: " - + e.getLocalizedMessage()); + NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e); } } } @@ -1967,7 +1978,7 @@ public class FSDirectory implements Closeable { * Verify children size for fs limit. * @throws MaxDirectoryItemsExceededException too many children. */ - private void verifyMaxDirItems(INode[] pathComponents, int pos) + void verifyMaxDirItems(INode[] pathComponents, int pos) throws MaxDirectoryItemsExceededException { if (maxDirItems == 0) { return; @@ -2015,8 +2026,11 @@ public class FSDirectory implements Closeable { // original location becase a quota violation would cause the the item // to go "poof". The fs limits must be bypassed for the same reason. if (checkQuota) { - verifyFsLimits(inodes, pos, child); + verifyMaxComponentLength(child.getLocalNameBytes(), inodes, pos); + verifyMaxDirItems(inodes, pos); } + // always verify inode name + verifyINodeName(child.getLocalNameBytes()); final Quota.Counts counts = child.computeQuotaUsage(); updateCount(iip, pos, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index a8311d57d6..975ea57c07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -565,6 +565,10 @@ public class FSImageFormat { INode loadINode(final byte[] localName, boolean isSnapshotINode, DataInput in) throws IOException { final int imgVersion = getLayoutVersion(); + if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) { + namesystem.getFSDirectory().verifyINodeName(localName); + } + long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ? in.readLong() : namesystem.allocateNewInodeId(); @@ -903,7 +907,7 @@ public class FSImageFormat { * actually leads to. * @return The snapshot path. */ - private String computeSnapshotPath(String nonSnapshotPath, + private static String computeSnapshotPath(String nonSnapshotPath, Snapshot snapshot) { String snapshotParentFullPath = snapshot.getRoot().getParent() .getFullPathName(); @@ -911,10 +915,8 @@ public class FSImageFormat { String relativePath = nonSnapshotPath.equals(snapshotParentFullPath) ? Path.SEPARATOR : nonSnapshotPath.substring( snapshotParentFullPath.length()); - String snapshotFullPath = snapshotParentFullPath + Path.SEPARATOR - + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName - + relativePath; - return snapshotFullPath; + return Snapshot.getSnapshotPath(snapshotParentFullPath, + snapshotName + relativePath); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index dd652ec94b..c2d24be548 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5806,7 +5806,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (snapshotName == null || snapshotName.isEmpty()) { snapshotName = Snapshot.generateDefaultSnapshotName(); } - dir.verifyMaxComponentLength(snapshotName, snapshotRoot, 0); + dir.verifySnapshotName(snapshotName, snapshotRoot); dir.writeLock(); try { snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName); @@ -5844,7 +5844,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, safeMode); } checkOwner(pc, path); - dir.verifyMaxComponentLength(snapshotNewName, path, 0); + dir.verifySnapshotName(snapshotNewName, path); snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName); getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName); @@ -5854,12 +5854,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, getEditLog().logSync(); if (auditLog.isInfoEnabled() && isExternalInvocation()) { - Path oldSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR - + "/" + snapshotOldName); - Path newSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR - + "/" + snapshotNewName); - logAuditEvent(true, "renameSnapshot", oldSnapshotRoot.toString(), - newSnapshotRoot.toString(), null); + String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName); + String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName); + logAuditEvent(true, "renameSnapshot", oldSnapshotRoot, newSnapshotRoot, null); } } @@ -5959,9 +5956,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, getEditLog().logSync(); if (auditLog.isInfoEnabled() && isExternalInvocation()) { - Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR - + Path.SEPARATOR + snapshotName); - logAuditEvent(true, "deleteSnapshot", rootPath.toString(), null, null); + String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName); + logAuditEvent(true, "deleteSnapshot", rootPath, null, null); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 6009b39e7c..58e5e664c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.FileNotFoundException; import java.io.PrintWriter; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -511,8 +512,8 @@ public class INodeDirectory extends INodeWithAdditionalFields { * @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR} */ private static boolean isDotSnapshotDir(byte[] pathComponent) { - return pathComponent == null ? false : HdfsConstants.DOT_SNAPSHOT_DIR - .equalsIgnoreCase(DFSUtil.bytes2String(pathComponent)); + return pathComponent == null ? false + : Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index 07b05bf25a..7c194107a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -49,9 +49,16 @@ public class Snapshot implements Comparable { return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date()); } - static String getSnapshotPath(String snapshottableDir, String snapshotName) { - return new Path(snapshottableDir, HdfsConstants.DOT_SNAPSHOT_DIR - + Path.SEPARATOR + snapshotName).toString(); + public static String getSnapshotPath(String snapshottableDir, + String snapshotRelativePath) { + final StringBuilder b = new StringBuilder(snapshottableDir); + if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) { + b.append(Path.SEPARATOR); + } + return b.append(HdfsConstants.DOT_SNAPSHOT_DIR) + .append(Path.SEPARATOR) + .append(snapshotRelativePath) + .toString(); } /** @@ -123,9 +130,7 @@ public class Snapshot implements Comparable { @Override public String getFullPathName() { - return getParent().getFullPathName() + Path.SEPARATOR - + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR - + this.getLocalName(); + return getSnapshotPath(getParent().getFullPathName(), getLocalName()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index 4e576db793..4c1e3d654a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -123,7 +123,7 @@ class ImageLoaderCurrent implements ImageLoader { new SimpleDateFormat("yyyy-MM-dd HH:mm"); private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, - -40, -41, -42}; + -40, -41, -42, -43}; private int imageVersion = 0; /* (non-Javadoc) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java index a80e1d76eb..5a0b2038f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java @@ -43,14 +43,18 @@ public class SnapshotDiff { if (Path.CUR_DIR.equals(name)) { // current directory return ""; } - if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR) - || name.startsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR - + Path.SEPARATOR)) { - // get the snapshot name - int i = name.indexOf(HdfsConstants.DOT_SNAPSHOT_DIR); - return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1); + final int i; + if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { + i = 0; + } else if (name.startsWith( + HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { + i = 1; + } else { + return name; } - return name; + + // get the snapshot name + return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1); } public static void main(String[] argv) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index 4ec479bd93..8adcbbde0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -33,8 +33,10 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.FSLimitException.IllegalNameException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.junit.Before; import org.junit.Test; @@ -104,6 +106,7 @@ public class TestFsLimits { addChildWithName("333", null); addChildWithName("4444", null); addChildWithName("55555", null); + addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class); } @Test @@ -143,6 +146,7 @@ public class TestFsLimits { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2); fsIsReady = false; + addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class); addChildWithName("1", null); addChildWithName("22", null); addChildWithName("333", null); @@ -159,7 +163,10 @@ public class TestFsLimits { Class generated = null; try { - fs.verifyFsLimits(inodes, 1, child); + fs.verifyMaxComponentLength(child.getLocalNameBytes(), inodes, 1); + fs.verifyMaxDirItems(inodes, 1); + fs.verifyINodeName(child.getLocalNameBytes()); + rootInode.addChild(child); } catch (QuotaExceededException e) { generated = e.getClass(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index dd3a2ca742..56e48ff1a4 100644 Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index 2a285ab871..e3b0eb53ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1,6 +1,6 @@ - -42 + -43 OP_START_LOG_SEGMENT @@ -13,8 +13,8 @@ 2 1 - 1358580908163 - 96eeb98b6abe3660 + 1366591664956 + f80c8ce0a9ff77d5 @@ -24,33 +24,26 @@ 3 2 - 1358580908166 - 975b880e8601c22e + 1366591664958 + 75e0c5176b531b18 - - OP_SET_GENSTAMP - - 4 - 1001 - - OP_ADD - 5 + 4 0 1002 - /file_create + /file_create_u\0001;F431 1 - 1357889709370 - 1357889709370 + 1365900465930 + 1365900465930 512 - DFSClient_NONMAPREDUCE_-1898631685_1 + DFSClient_NONMAPREDUCE_724142360_1 127.0.0.1 - jing + szetszwo supergroup 420 @@ -59,18 +52,18 @@ OP_CLOSE - 6 + 5 0 0 - /file_create + /file_create_u\0001;F431 1 - 1357889709398 - 1357889709370 + 1365900465946 + 1365900465930 512 - jing + szetszwo supergroup 420 @@ -79,32 +72,32 @@ OP_RENAME_OLD - 7 + 6 0 - /file_create + /file_create_u\0001;F431 /file_moved - 1357889709400 + 1365900465949 OP_DELETE - 8 + 7 0 /file_moved - 1357889709595 + 1365900465953 OP_MKDIR - 9 + 8 0 1003 /directory_mkdir - 1357889709598 + 1365900465956 - jing + szetszwo supergroup 493 @@ -113,28 +106,28 @@ OP_ALLOW_SNAPSHOT - 10 + 9 /directory_mkdir OP_DISALLOW_SNAPSHOT - 11 + 10 /directory_mkdir OP_ALLOW_SNAPSHOT - 12 + 11 /directory_mkdir OP_CREATE_SNAPSHOT - 13 + 12 /directory_mkdir snapshot1 @@ -142,7 +135,7 @@ OP_RENAME_SNAPSHOT - 14 + 13 /directory_mkdir snapshot1 snapshot2 @@ -151,33 +144,26 @@ OP_DELETE_SNAPSHOT - 15 + 14 /directory_mkdir snapshot2 - - OP_SET_GENSTAMP - - 16 - 1002 - - OP_ADD - 17 + 15 0 1004 - /file_create + /file_create_u\0001;F431 1 - 1357889709615 - 1357889709615 + 1365900465976 + 1365900465976 512 - DFSClient_NONMAPREDUCE_-1898631685_1 + DFSClient_NONMAPREDUCE_724142360_1 127.0.0.1 - jing + szetszwo supergroup 420 @@ -186,18 +172,18 @@ OP_CLOSE - 18 + 16 0 0 - /file_create + /file_create_u\0001;F431 1 - 1357889709616 - 1357889709615 + 1365900465978 + 1365900465976 512 - jing + szetszwo supergroup 420 @@ -206,33 +192,33 @@ OP_SET_REPLICATION - 19 - /file_create + 17 + /file_create_u\0001;F431 1 OP_SET_PERMISSIONS - 20 - /file_create + 18 + /file_create_u\0001;F431 511 OP_SET_OWNER - 21 - /file_create + 19 + /file_create_u\0001;F431 newOwner OP_TIMES - 22 + 20 0 - /file_create + /file_create_u\0001;F431 1285195527000 1285195527000 @@ -240,7 +226,7 @@ OP_SET_QUOTA - 23 + 21 /directory_mkdir 1000 -1 @@ -249,36 +235,29 @@ OP_RENAME - 24 + 22 0 - /file_create + /file_create_u\0001;F431 /file_moved - 1357889709627 + 1365900465991 NONE - - OP_SET_GENSTAMP - - 25 - 1003 - - OP_ADD - 26 + 23 0 1005 /file_concat_target 1 - 1357889709631 - 1357889709631 + 1365900465996 + 1365900465996 512 - DFSClient_NONMAPREDUCE_-1898631685_1 + DFSClient_NONMAPREDUCE_724142360_1 127.0.0.1 - jing + szetszwo supergroup 420 @@ -287,142 +266,178 @@ OP_SET_GENSTAMP - 27 - 1004 + 24 + 1001 OP_UPDATE_BLOCKS + + 25 + /file_concat_target + + 7730270391831370404 + 0 + 1001 + + + + + OP_SET_GENSTAMP + + 26 + 1002 + + + + OP_UPDATE_BLOCKS + + 27 + /file_concat_target + + 7730270391831370404 + 512 + 1001 + + + 7070364572574548346 + 0 + 1002 + + + + + OP_SET_GENSTAMP 28 - /file_concat_target - - 2251442935629307058 - 0 - 1004 - + 1003 - OP_SET_GENSTAMP + OP_UPDATE_BLOCKS 29 - 1005 - - - - OP_UPDATE_BLOCKS - - 30 /file_concat_target - 2251442935629307058 + 7730270391831370404 512 - 1004 + 1001 - -2982568627579528939 + 7070364572574548346 + 512 + 1002 + + + -2436647467986907584 0 - 1005 - - - - - OP_SET_GENSTAMP - - 31 - 1006 - - - - OP_UPDATE_BLOCKS - - 32 - /file_concat_target - - 2251442935629307058 - 512 - 1004 - - - -2982568627579528939 - 512 - 1005 - - - 4155042801019283737 - 0 - 1006 + 1003 OP_CLOSE - 33 + 30 0 0 /file_concat_target 1 - 1357889709689 - 1357889709631 + 1365900466070 + 1365900465996 512 - 2251442935629307058 + 7730270391831370404 512 - 1004 + 1001 - -2982568627579528939 + 7070364572574548346 512 - 1005 + 1002 - 4155042801019283737 + -2436647467986907584 512 - 1006 + 1003 - jing + szetszwo supergroup 420 + + OP_ADD + + 31 + 0 + 1006 + /file_concat_0 + 1 + 1365900466074 + 1365900466074 + 512 + DFSClient_NONMAPREDUCE_724142360_1 + 127.0.0.1 + + szetszwo + supergroup + 420 + + + + + OP_SET_GENSTAMP + + 32 + 1004 + + + + OP_UPDATE_BLOCKS + + 33 + /file_concat_0 + + -8902070029031700083 + 0 + 1004 + + + OP_SET_GENSTAMP 34 - 1007 + 1005 - OP_ADD + OP_UPDATE_BLOCKS 35 - 0 - 1006 /file_concat_0 - 1 - 1357889709692 - 1357889709692 - 512 - DFSClient_NONMAPREDUCE_-1898631685_1 - 127.0.0.1 - - jing - supergroup - 420 - + + -8902070029031700083 + 512 + 1004 + + + 1791253399175285670 + 0 + 1005 + OP_SET_GENSTAMP 36 - 1008 + 1006 @@ -431,229 +446,179 @@ 37 /file_concat_0 - -1610317934607606165 + -8902070029031700083 + 512 + 1004 + + + 1791253399175285670 + 512 + 1005 + + + 3333415502075331416 0 - 1008 + 1006 - OP_SET_GENSTAMP + OP_CLOSE 38 - 1009 + 0 + 0 + /file_concat_0 + 1 + 1365900466094 + 1365900466074 + 512 + + + + -8902070029031700083 + 512 + 1004 + + + 1791253399175285670 + 512 + 1005 + + + 3333415502075331416 + 512 + 1006 + + + szetszwo + supergroup + 420 + - OP_UPDATE_BLOCKS + OP_ADD 39 - /file_concat_0 - - -1610317934607606165 - 512 - 1008 - - - -8800031246891481946 - 0 - 1009 - + 0 + 1007 + /file_concat_1 + 1 + 1365900466097 + 1365900466097 + 512 + DFSClient_NONMAPREDUCE_724142360_1 + 127.0.0.1 + + szetszwo + supergroup + 420 + OP_SET_GENSTAMP 40 - 1010 + 1007 OP_UPDATE_BLOCKS 41 - /file_concat_0 + /file_concat_1 - -1610317934607606165 - 512 - 1008 - - - -8800031246891481946 - 512 - 1009 - - - 5107221616119360155 + -406914295015578364 0 - 1010 + 1007 - OP_CLOSE + OP_SET_GENSTAMP 42 - 0 - 0 - /file_concat_0 - 1 - 1357889709710 - 1357889709692 - 512 - - + 1008 + + + + OP_UPDATE_BLOCKS + + 43 + /file_concat_1 - -1610317934607606165 + -406914295015578364 + 512 + 1007 + + + 208049244517243116 + 0 + 1008 + + + + + OP_SET_GENSTAMP + + 44 + 1009 + + + + OP_UPDATE_BLOCKS + + 45 + /file_concat_1 + + -406914295015578364 + 512 + 1007 + + + 208049244517243116 512 1008 - -8800031246891481946 - 512 + -1546331983133724845 + 0 1009 - - 5107221616119360155 - 512 - 1010 - - - jing - supergroup - 420 - - - - - OP_SET_GENSTAMP - - 43 - 1011 - - - - OP_ADD - - 44 - 0 - 1007 - /file_concat_1 - 1 - 1357889709713 - 1357889709713 - 512 - DFSClient_NONMAPREDUCE_-1898631685_1 - 127.0.0.1 - - jing - supergroup - 420 - - - - - OP_SET_GENSTAMP - - 45 - 1012 - - - - OP_UPDATE_BLOCKS - - 46 - /file_concat_1 - - -8774815971566494617 - 0 - 1012 - - - - - OP_SET_GENSTAMP - - 47 - 1013 - - - - OP_UPDATE_BLOCKS - - 48 - /file_concat_1 - - -8774815971566494617 - 512 - 1012 - - - -7548224044764905612 - 0 - 1013 - - - - - OP_SET_GENSTAMP - - 49 - 1014 - - - - OP_UPDATE_BLOCKS - - 50 - /file_concat_1 - - -8774815971566494617 - 512 - 1012 - - - -7548224044764905612 - 512 - 1013 - - - -4765510052507711478 - 0 - 1014 - OP_CLOSE - 51 + 46 0 0 /file_concat_1 1 - 1357889709744 - 1357889709713 + 1365900466121 + 1365900466097 512 - -8774815971566494617 + -406914295015578364 512 - 1012 + 1007 - -7548224044764905612 + 208049244517243116 512 - 1013 + 1008 - -4765510052507711478 + -1546331983133724845 512 - 1014 + 1009 - jing + szetszwo supergroup 420 @@ -662,10 +627,10 @@ OP_CONCAT_DELETE - 52 + 47 0 /file_concat_target - 1357889709747 + 1365900466123 /file_concat_0 /file_concat_1 @@ -675,15 +640,15 @@ OP_SYMLINK - 53 + 48 0 1008 /file_symlink /file_concat_target - 1357889709751 - 1357889709751 + 1365900466141 + 1365900466141 - jing + szetszwo supergroup 511 @@ -692,75 +657,68 @@ OP_GET_DELEGATION_TOKEN - 54 + 49 HDFS_DELEGATION_TOKEN 1 - jing + szetszwo JobTracker - 1357889709754 - 1358494509754 + 1365900466144 + 1366505266144 2 - 1357976109754 + 1365986866144 OP_RENEW_DELEGATION_TOKEN - 55 + 50 HDFS_DELEGATION_TOKEN 1 - jing + szetszwo JobTracker - 1357889709754 - 1358494509754 + 1365900466144 + 1366505266144 2 - 1357976109820 + 1365986866231 OP_CANCEL_DELEGATION_TOKEN - 56 + 51 HDFS_DELEGATION_TOKEN 1 - jing + szetszwo JobTracker - 1357889709754 - 1358494509754 + 1365900466144 + 1366505266144 2 - - OP_SET_GENSTAMP - - 57 - 1015 - - OP_ADD - 58 + 52 0 1009 /hard-lease-recovery-test 1 - 1357889709825 - 1357889709825 + 1365900466237 + 1365900466237 512 - DFSClient_NONMAPREDUCE_-1898631685_1 + DFSClient_NONMAPREDUCE_724142360_1 127.0.0.1 - jing + szetszwo supergroup 420 @@ -769,46 +727,46 @@ OP_SET_GENSTAMP - 59 - 1016 + 53 + 1010 OP_UPDATE_BLOCKS - 60 + 54 /hard-lease-recovery-test - 2307430723798727721 + -8246064927003717498 0 - 1016 + 1010 OP_UPDATE_BLOCKS - 61 + 55 /hard-lease-recovery-test - 2307430723798727721 + -8246064927003717498 0 - 1016 + 1010 OP_SET_GENSTAMP - 62 - 1017 + 56 + 1011 OP_REASSIGN_LEASE - 63 - DFSClient_NONMAPREDUCE_-1898631685_1 + 57 + DFSClient_NONMAPREDUCE_724142360_1 /hard-lease-recovery-test HDFS_NameNode @@ -816,23 +774,23 @@ OP_CLOSE - 64 + 58 0 0 /hard-lease-recovery-test 1 - 1357889712257 - 1357889709825 + 1365900468855 + 1365900466237 512 - 2307430723798727721 + -8246064927003717498 11 - 1017 + 1011 - jing + szetszwo supergroup 420 @@ -841,7 +799,7 @@ OP_END_LOG_SEGMENT - 65 + 59