diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 67c3e69213..ce7f0306be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -485,6 +485,8 @@ Release 2.5.0 - UNRELEASED HDFS-6399. Add note about setfacl in HDFS permissions guide. (cnauroth via wang) + HDFS-6315. Decouple recording edit logs from FSDirectory. (wheat9) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index a21b7dba33..ee7044c463 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -265,11 +264,6 @@ protected void setReady(boolean flag) { ready = flag; } - private void incrDeletedFileCount(long count) { - if (getFSNamesystem() != null) - NameNode.getNameNodeMetrics().incrFilesDeleted(count); - } - /** * Shutdown the filestore */ @@ -436,65 +430,6 @@ BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block, } } - /** - * Persist the block list for the inode. - */ - void persistBlocks(String path, INodeFile file, boolean logRetryCache) { - Preconditions.checkArgument(file.isUnderConstruction()); - waitForReady(); - - writeLock(); - try { - fsImage.getEditLog().logUpdateBlocks(path, file, logRetryCache); - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: " - +path+" with "+ file.getBlocks().length - +" blocks is persisted to the file system"); - } - } finally { - writeUnlock(); - } - } - - /** - * Persist the new block (the last block of the given file). - */ - void persistNewBlock(String path, INodeFile file) { - Preconditions.checkArgument(file.isUnderConstruction()); - waitForReady(); - - writeLock(); - try { - fsImage.getEditLog().logAddBlock(path, file); - } finally { - writeUnlock(); - } - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* FSDirectory.persistNewBlock: " - + path + " with new block " + file.getLastBlock().toString() - + ", current total block count is " + file.getBlocks().length); - } - } - - /** - * Close file. - */ - void closeFile(String path, INodeFile file) { - waitForReady(); - writeLock(); - try { - // file is closed - fsImage.getEditLog().logCloseFile(path, file); - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: " - +path+" with "+ file.getBlocks().length - +" blocks is persisted to the file system"); - } - } finally { - writeUnlock(); - } - } - /** * Remove a block from the file. * @return Whether the block exists in the corresponding file @@ -540,7 +475,7 @@ boolean unprotectedRemoveBlock(String path, * @deprecated Use {@link #renameTo(String, String, boolean, Rename...)} */ @Deprecated - boolean renameTo(String src, String dst, boolean logRetryCache) + boolean renameTo(String src, String dst, long mtime) throws QuotaExceededException, UnresolvedLinkException, FileAlreadyExistsException, SnapshotAccessControlException, IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -548,22 +483,20 @@ boolean renameTo(String src, String dst, boolean logRetryCache) +src+" to "+dst); } waitForReady(); - long now = now(); writeLock(); try { - if (!unprotectedRenameTo(src, dst, now)) + if (!unprotectedRenameTo(src, dst, mtime)) return false; } finally { writeUnlock(); } - fsImage.getEditLog().logRename(src, dst, now, logRetryCache); return true; } /** * @see #unprotectedRenameTo(String, String, long, Options.Rename...) */ - void renameTo(String src, String dst, boolean logRetryCache, + void renameTo(String src, String dst, long mtime, Options.Rename... options) throws FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, QuotaExceededException, @@ -573,16 +506,14 @@ void renameTo(String src, String dst, boolean logRetryCache, + " to " + dst); } waitForReady(); - long now = now(); writeLock(); try { - if (unprotectedRenameTo(src, dst, now, options)) { - incrDeletedFileCount(1); + if (unprotectedRenameTo(src, dst, mtime, options)) { + namesystem.incrDeletedFileCount(1); } } finally { writeUnlock(); } - fsImage.getEditLog().logRename(src, dst, now, logRetryCache, options); } /** @@ -1106,11 +1037,7 @@ Block[] setReplication(String src, short replication, short[] blockRepls) waitForReady(); writeLock(); try { - final Block[] fileBlocks = unprotectedSetReplication( - src, replication, blockRepls); - if (fileBlocks != null) // log replication change - fsImage.getEditLog().logSetReplication(src, replication); - return fileBlocks; + return unprotectedSetReplication(src, replication, blockRepls); } finally { writeUnlock(); } @@ -1178,7 +1105,6 @@ void setPermission(String src, FsPermission permission) } finally { writeUnlock(); } - fsImage.getEditLog().logSetPermissions(src, permission); } void unprotectedSetPermission(String src, FsPermission permissions) @@ -1203,7 +1129,6 @@ void setOwner(String src, String username, String groupname) } finally { writeUnlock(); } - fsImage.getEditLog().logSetOwner(src, username, groupname); } void unprotectedSetOwner(String src, String username, String groupname) @@ -1226,18 +1151,14 @@ void unprotectedSetOwner(String src, String username, String groupname) /** * Concat all the blocks from srcs to trg and delete the srcs files */ - void concat(String target, String [] srcs, boolean supportRetryCache) + void concat(String target, String[] srcs, long timestamp) throws UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException, SnapshotException { writeLock(); try { // actual move waitForReady(); - long timestamp = now(); unprotectedConcat(target, srcs, timestamp); - // do the commit - fsImage.getEditLog().logConcat(target, srcs, timestamp, - supportRetryCache); } finally { writeUnlock(); } @@ -1312,17 +1233,14 @@ void unprotectedConcat(String target, String [] srcs, long timestamp) * @param src Path of a directory to delete * @param collectedBlocks Blocks under the deleted directory * @param removedINodes INodes that should be removed from {@link #inodeMap} - * @param logRetryCache Whether to record RPC IDs in editlog to support retry - * cache rebuilding. - * @return true on successful deletion; else false + * @return the number of files that have been removed */ - boolean delete(String src, BlocksMapUpdateInfo collectedBlocks, - List removedINodes, boolean logRetryCache) throws IOException { + long delete(String src, BlocksMapUpdateInfo collectedBlocks, + List removedINodes, long mtime) throws IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src); } waitForReady(); - long now = now(); final long filesRemoved; writeLock(); try { @@ -1335,20 +1253,13 @@ boolean delete(String src, BlocksMapUpdateInfo collectedBlocks, new ArrayList(); checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs); filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, - removedINodes, now); + removedINodes, mtime); namesystem.removeSnapshottableDirs(snapshottableDirs); } } finally { writeUnlock(); } - if (filesRemoved < 0) { - return false; - } - fsImage.getEditLog().logDelete(src, now, logRetryCache); - incrDeletedFileCount(filesRemoved); - // Blocks/INodes will be handled later by the caller of this method - getFSNamesystem().removePathAndBlocks(src, null, null); - return true; + return filesRemoved; } private static boolean deleteAllowed(final INodesInPath iip, @@ -2419,7 +2330,7 @@ int getInodeMapSize() { /** * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. * Sets quota for for a directory. - * @return INodeDirectory if any of the quotas have changed. null other wise. + * @return INodeDirectory if any of the quotas have changed. null otherwise. * @throws FileNotFoundException if the path does not exist. * @throws PathIsNotDirectoryException if the path is not a directory. * @throws QuotaExceededException if the directory tree size is @@ -2470,21 +2381,17 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) /** * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. + * @return INodeDirectory if any of the quotas have changed. null otherwise. * @throws SnapshotAccessControlException if path is in RO snapshot * @see #unprotectedSetQuota(String, long, long) */ - void setQuota(String src, long nsQuota, long dsQuota) + INodeDirectory setQuota(String src, long nsQuota, long dsQuota) throws FileNotFoundException, PathIsNotDirectoryException, QuotaExceededException, UnresolvedLinkException, SnapshotAccessControlException { writeLock(); try { - INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota); - if (dir != null) { - final Quota.Counts q = dir.getQuotaCounts(); - fsImage.getEditLog().logSetQuota(src, - q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE)); - } + return unprotectedSetQuota(src, nsQuota, dsQuota); } finally { writeUnlock(); } @@ -2503,18 +2410,14 @@ long totalInodes() { /** * Sets the access time on the file/directory. Logs it in the transaction log. */ - void setTimes(String src, INode inode, long mtime, long atime, boolean force, - int latestSnapshotId) throws QuotaExceededException { - boolean status = false; + boolean setTimes(INode inode, long mtime, long atime, boolean force, + int latestSnapshotId) throws QuotaExceededException { writeLock(); try { - status = unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId); + return unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId); } finally { writeUnlock(); } - if (status) { - fsImage.getEditLog().logTimes(src, mtime, atime); - } } boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) @@ -2730,11 +2633,10 @@ INodeSymlink unprotectedAddSymlink(long id, String path, String target, return addINode(path, symlink) ? symlink : null; } - void modifyAclEntries(String src, List aclSpec) throws IOException { + List modifyAclEntries(String src, List aclSpec) throws IOException { writeLock(); try { - List newAcl = unprotectedModifyAclEntries(src, aclSpec); - fsImage.getEditLog().logSetAcl(src, newAcl); + return unprotectedModifyAclEntries(src, aclSpec); } finally { writeUnlock(); } @@ -2753,11 +2655,10 @@ private List unprotectedModifyAclEntries(String src, return newAcl; } - void removeAclEntries(String src, List aclSpec) throws IOException { + List removeAclEntries(String src, List aclSpec) throws IOException { writeLock(); try { - List newAcl = unprotectedRemoveAclEntries(src, aclSpec); - fsImage.getEditLog().logSetAcl(src, newAcl); + return unprotectedRemoveAclEntries(src, aclSpec); } finally { writeUnlock(); } @@ -2776,11 +2677,10 @@ private List unprotectedRemoveAclEntries(String src, return newAcl; } - void removeDefaultAcl(String src) throws IOException { + List removeDefaultAcl(String src) throws IOException { writeLock(); try { - List newAcl = unprotectedRemoveDefaultAcl(src); - fsImage.getEditLog().logSetAcl(src, newAcl); + return unprotectedRemoveDefaultAcl(src); } finally { writeUnlock(); } @@ -2803,7 +2703,6 @@ void removeAcl(String src) throws IOException { writeLock(); try { unprotectedRemoveAcl(src); - fsImage.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST); } finally { writeUnlock(); } @@ -2817,11 +2716,10 @@ private void unprotectedRemoveAcl(String src) throws IOException { AclStorage.removeINodeAcl(inode, snapshotId); } - void setAcl(String src, List aclSpec) throws IOException { + List setAcl(String src, List aclSpec) throws IOException { writeLock(); try { - List newAcl = unprotectedSetAcl(src, aclSpec); - fsImage.getEditLog().logSetAcl(src, newAcl); + return unprotectedSetAcl(src, aclSpec); } finally { writeUnlock(); } @@ -2868,18 +2766,11 @@ AclStatus getAclStatus(String src) throws IOException { readUnlock(); } } - - void removeXAttr(String src, XAttr xAttr) throws IOException { + + XAttr removeXAttr(String src, XAttr xAttr) throws IOException { writeLock(); try { - XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr); - if (removedXAttr != null) { - fsImage.getEditLog().logRemoveXAttr(src, removedXAttr); - } else { - NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " + - XAttrHelper.getPrefixName(xAttr) + - " does not exist on the path " + src); - } + return unprotectedRemoveXAttr(src, xAttr); } finally { writeUnlock(); } @@ -2917,12 +2808,11 @@ List filterINodeXAttr(List existingXAttrs, return xAttrs; } - void setXAttr(String src, XAttr xAttr, EnumSet flag, - boolean logRetryCache) throws IOException { + void setXAttr(String src, XAttr xAttr, EnumSet flag) + throws IOException { writeLock(); try { unprotectedSetXAttr(src, xAttr, flag); - fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache); } finally { writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d7bbf766a6..1d1d660dc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -145,7 +145,6 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.StorageType; -import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; @@ -1567,6 +1566,7 @@ private void setPermissionInt(String src, FsPermission permission) src = FSDirectory.resolvePath(src, pathComponents, dir); checkOwner(pc, src); dir.setPermission(src, permission); + getEditLog().logSetPermissions(src, permission); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -1612,6 +1612,7 @@ private void setOwnerInt(String src, String username, String group) } } dir.setOwner(src, username, group); + getEditLog().logSetOwner(src, username, group); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -1742,7 +1743,11 @@ && doAccessTime && isAccessTimeSupported()) { if (isReadOp) { continue; } - dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshotId()); + boolean changed = dir.setTimes(inode, -1, now, false, + iip.getLatestSnapshotId()); + if (changed) { + getEditLog().logTimes(src, -1, now); + } } } final long fileSize = iip.isSnapshot() ? @@ -1953,7 +1958,9 @@ private void concatInternal(FSPermissionChecker pc, String target, Arrays.toString(srcs) + " to " + target); } - dir.concat(target,srcs, logRetryCache); + long timestamp = now(); + dir.concat(target, srcs, timestamp); + getEditLog().logConcat(target, srcs, timestamp, logRetryCache); } /** @@ -1994,7 +2001,11 @@ private void setTimesInt(String src, long mtime, long atime) final INodesInPath iip = dir.getINodesInPath4Write(src); final INode inode = iip.getLastINode(); if (inode != null) { - dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshotId()); + boolean changed = dir.setTimes(inode, mtime, atime, true, + iip.getLatestSnapshotId()); + if (changed) { + getEditLog().logTimes(src, mtime, atime); + } resultingStat = getAuditFileInfo(src, false); } else { throw new FileNotFoundException("File/Directory " + src + " does not exist."); @@ -2115,6 +2126,7 @@ private boolean setReplicationInt(String src, final short replication) final Block[] blocks = dir.setReplication(src, replication, blockRepls); isFile = blocks != null; if (isFile) { + getEditLog().logSetReplication(src, replication); blockManager.setReplication(blockRepls[0], blockRepls[1], src, blocks); } } finally { @@ -2740,7 +2752,7 @@ LocatedBlock getAdditionalBlock(String src, long fileId, String clientName, INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile); saveAllocatedBlock(src, inodesInPath, newBlock, targets); - dir.persistNewBlock(src, pendingFile); + persistNewBlock(src, pendingFile); offset = pendingFile.computeFileSize(); } finally { writeUnlock(); @@ -2960,7 +2972,7 @@ boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder) NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b + " is removed from pendingCreates"); } - dir.persistBlocks(src, file, false); + persistBlocks(src, file, false); } finally { writeUnlock(); } @@ -3260,7 +3272,9 @@ private boolean renameToInternal(FSPermissionChecker pc, String src, false, false); } - if (dir.renameTo(src, dst, logRetryCache)) { + long mtime = now(); + if (dir.renameTo(src, dst, mtime)) { + getEditLog().logRename(src, dst, mtime, logRetryCache); return true; } return false; @@ -3325,7 +3339,9 @@ private void renameToInternal(FSPermissionChecker pc, String src, String dst, false); } - dir.renameTo(src, dst, logRetryCache, options); + long mtime = now(); + dir.renameTo(src, dst, mtime, options); + getEditLog().logRename(src, dst, mtime, logRetryCache, options); } /** @@ -3408,10 +3424,17 @@ private boolean deleteInternal(String src, boolean recursive, checkPermission(pc, src, false, null, FsAction.WRITE, null, FsAction.ALL, true, false); } + long mtime = now(); // Unlink the target directory from directory tree - if (!dir.delete(src, collectedBlocks, removedINodes, logRetryCache)) { + long filesRemoved = dir.delete(src, collectedBlocks, removedINodes, + mtime); + if (filesRemoved < 0) { return false; } + getEditLog().logDelete(src, mtime, logRetryCache); + incrDeletedFileCount(filesRemoved); + // Blocks/INodes will be handled later + removePathAndBlocks(src, null, null); ret = true; } finally { writeUnlock(); @@ -3721,7 +3744,7 @@ ContentSummary getContentSummary(String src) throws IOException { * * Note: This does not support ".inodes" relative path. */ - void setQuota(String path, long nsQuota, long dsQuota) + void setQuota(String path, long nsQuota, long dsQuota) throws IOException, UnresolvedLinkException { checkSuperuserPrivilege(); checkOperation(OperationCategory.WRITE); @@ -3729,7 +3752,12 @@ void setQuota(String path, long nsQuota, long dsQuota) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set quota on " + path); - dir.setQuota(path, nsQuota, dsQuota); + INodeDirectory changed = dir.setQuota(path, nsQuota, dsQuota); + if (changed != null) { + final Quota.Counts q = changed.getQuotaCounts(); + getEditLog().logSetQuota(path, + q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE)); + } } finally { writeUnlock(); } @@ -3770,7 +3798,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength) pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock( pendingFile, lastBlockLength); } - dir.persistBlocks(src, pendingFile, false); + persistBlocks(src, pendingFile, false); } finally { writeUnlock(); } @@ -3963,7 +3991,7 @@ private void finalizeINodeFileUnderConstruction(String src, final INodeFile newFile = pendingFile.toCompleteFile(now()); // close file and persist block allocations for this file - dir.closeFile(src, newFile); + closeFile(src, newFile); blockManager.checkReplication(newFile); } @@ -4114,7 +4142,8 @@ void commitBlockSynchronization(ExtendedBlock lastblock, src = closeFileCommitBlocks(iFile, storedBlock); } else { // If this commit does not want to close the file, persist blocks - src = persistBlocks(iFile, false); + src = iFile.getFullPathName(); + persistBlocks(src, iFile, false); } } finally { writeUnlock(); @@ -4152,21 +4181,6 @@ String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock) return src; } - /** - * Persist the block list for the given file. - * - * @param pendingFile - * @return Path to the given file. - * @throws IOException - */ - @VisibleForTesting - String persistBlocks(INodeFile pendingFile, boolean logRetryCache) - throws IOException { - String src = pendingFile.getFullPathName(); - dir.persistBlocks(src, pendingFile, logRetryCache); - return src; - } - /** * Renew the lease(s) held by the given client */ @@ -4350,6 +4364,45 @@ void checkAvailableResources() { hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace(); } + /** + * Persist the block list for the inode. + * @param path + * @param file + * @param logRetryCache + */ + private void persistBlocks(String path, INodeFile file, + boolean logRetryCache) { + assert hasWriteLock(); + Preconditions.checkArgument(file.isUnderConstruction()); + getEditLog().logUpdateBlocks(path, file, logRetryCache); + if(NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("persistBlocks: " + path + + " with " + file.getBlocks().length + " blocks is persisted to" + + " the file system"); + } + } + + void incrDeletedFileCount(long count) { + NameNode.getNameNodeMetrics().incrFilesDeleted(count); + } + + /** + * Close file. + * @param path + * @param file + */ + private void closeFile(String path, INodeFile file) { + assert hasWriteLock(); + dir.waitForReady(); + // file is closed + getEditLog().logCloseFile(path, file); + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("closeFile: " + +path+" with "+ file.getBlocks().length + +" blocks is persisted to the file system"); + } + } + /** * Periodically calls hasAvailableResources of NameNodeResourceChecker, and if * there are found to be insufficient resources available, causes the NN to @@ -4682,6 +4735,21 @@ void setBalancerBandwidth(long bandwidth) throws IOException { getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); } + /** + * Persist the new block (the last block of the given file). + * @param path + * @param file + */ + private void persistNewBlock(String path, INodeFile file) { + Preconditions.checkArgument(file.isUnderConstruction()); + getEditLog().logAddBlock(path, file); + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("persistNewBlock: " + + path + " with new block " + file.getLastBlock().toString() + + ", current total block count is " + file.getBlocks().length); + } + } + /** * SafeModeInfo contains information related to the safe mode. *

@@ -6090,7 +6158,7 @@ private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, blockinfo.setExpectedLocations(storages); String src = pendingFile.getFullPathName(); - dir.persistBlocks(src, pendingFile, logRetryCache); + persistBlocks(src, pendingFile, logRetryCache); } // rename was successful. If any part of the renamed subtree had @@ -7718,7 +7786,8 @@ void modifyAclEntries(String src, List aclSpec) throws IOException { checkNameNodeSafeMode("Cannot modify ACL entries on " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); checkOwner(pc, src); - dir.modifyAclEntries(src, aclSpec); + List newAcl = dir.modifyAclEntries(src, aclSpec); + getEditLog().logSetAcl(src, newAcl); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -7739,7 +7808,8 @@ void removeAclEntries(String src, List aclSpec) throws IOException { checkNameNodeSafeMode("Cannot remove ACL entries on " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); checkOwner(pc, src); - dir.removeAclEntries(src, aclSpec); + List newAcl = dir.removeAclEntries(src, aclSpec); + getEditLog().logSetAcl(src, newAcl); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -7760,7 +7830,8 @@ void removeDefaultAcl(String src) throws IOException { checkNameNodeSafeMode("Cannot remove default ACL entries on " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); checkOwner(pc, src); - dir.removeDefaultAcl(src); + List newAcl = dir.removeDefaultAcl(src); + getEditLog().logSetAcl(src, newAcl); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -7782,6 +7853,7 @@ void removeAcl(String src) throws IOException { src = FSDirectory.resolvePath(src, pathComponents, dir); checkOwner(pc, src); dir.removeAcl(src); + getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -7802,7 +7874,8 @@ void setAcl(String src, List aclSpec) throws IOException { checkNameNodeSafeMode("Cannot set ACL on " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); checkOwner(pc, src); - dir.setAcl(src, aclSpec); + List newAcl = dir.setAcl(src, aclSpec); + getEditLog().logSetAcl(src, newAcl); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -7878,7 +7951,8 @@ private void setXAttrInt(String src, XAttr xAttr, EnumSet flag, checkOwner(pc, src); checkPathAccess(pc, src, FsAction.WRITE); } - dir.setXAttr(src, xAttr, flag, logRetryCache); + dir.setXAttr(src, xAttr, flag); + getEditLog().logSetXAttr(src, xAttr, logRetryCache); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -7999,7 +8073,10 @@ void removeXAttr(String src, XAttr xAttr) throws IOException { checkPathAccess(pc, src, FsAction.WRITE); } - dir.removeXAttr(src, xAttr); + XAttr removedXAttr = dir.removeXAttr(src, xAttr); + if (removedXAttr != null) { + getEditLog().logRemoveXAttr(src, removedXAttr); + } resultingStat = getAuditFileInfo(src, false); } catch (AccessControlException e) { logAuditEvent(false, "removeXAttr", src); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java index 29336294f7..dac8c0fc36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java @@ -62,8 +62,6 @@ private FSNamesystem makeNameSystemSpy(Block block, INodeFile file) doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class)); doReturn("").when(namesystemSpy).closeFileCommitBlocks( any(INodeFile.class), any(BlockInfo.class)); - doReturn("").when(namesystemSpy).persistBlocks( - any(INodeFile.class), anyBoolean()); doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog(); return namesystemSpy; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index 8cf68a7a36..94112cfbf9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -209,7 +209,7 @@ private void rename(String src, String dst, Class expected) lazyInitFSDirectory(); Class generated = null; try { - fs.renameTo(src, dst, false, new Rename[] { }); + fs.renameTo(src, dst, now(), new Rename[] { }); } catch (Throwable e) { generated = e.getClass(); } @@ -222,7 +222,7 @@ private void deprecatedRename(String src, String dst, Class expected) lazyInitFSDirectory(); Class generated = null; try { - fs.renameTo(src, dst, false); + fs.renameTo(src, dst, now()); } catch (Throwable e) { generated = e.getClass(); }