From 3ac07b720b7839a7fe6c83f4ccfe319b6a892501 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao Gangumalla Date: Sat, 11 Aug 2018 23:22:59 -0700 Subject: [PATCH] HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and #getFileInfo() function. Contributed by Rakesh R. --- .../router/RouterNamenodeProtocol.java | 1 + .../server/blockmanagement/BlockManager.java | 34 ----------- .../blockmanagement/DatanodeDescriptor.java | 2 +- .../blockmanagement/DatanodeManager.java | 17 ------ .../hdfs/server/datanode/BPServiceActor.java | 16 ------ .../hdfs/server/namenode/FSNamesystem.java | 38 ------------- .../hdfs/server/namenode/Namesystem.java | 22 ------- .../sps/BlockStorageMovementNeeded.java | 18 +----- .../hdfs/server/namenode/sps/Context.java | 28 --------- .../hdfs/server/namenode/sps/SPSService.java | 5 +- .../namenode/sps/StoragePolicySatisfier.java | 19 +++---- .../hdfs/server/sps/ExternalSPSContext.java | 57 ++++--------------- .../sps/ExternalStoragePolicySatisfier.java | 2 +- .../src/site/markdown/ArchivalStorage.md | 2 +- .../TestPersistentStoragePolicySatisfier.java | 10 +++- ...StoragePolicySatisfierWithStripedFile.java | 2 +- .../TestExternalStoragePolicySatisfier.java | 4 +- ...TestStoragePolicySatisfyAdminCommands.java | 2 +- 18 files changed, 39 insertions(+), 240 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java index edfb3915fd..bf0db6e725 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java @@ -187,6 +187,7 @@ public boolean isRollingUpgrade() throws IOException { @Override public Long getNextSPSPath() throws IOException { + rpcServer.checkOperation(OperationCategory.READ, false); // not supported return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 87bd1554ed..d8a3aa37e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -4299,21 +4299,6 @@ private void updateNeededReconstructions(final BlockInfo block, } } - /** - * Check file has low redundancy blocks. - */ - public boolean hasLowRedundancyBlocks(BlockCollection bc) { - boolean result = false; - for (BlockInfo block : bc.getBlocks()) { - short expected = getExpectedRedundancyNum(block); - final NumberReplicas n = countNodes(block); - if (expected > n.liveReplicas()) { - result = true; - } - } - return result; - } - /** * Check sufficient redundancy of the blocks in the collection. If any block * is needed reconstruction, insert it into the reconstruction queue. @@ -5010,25 +4995,6 @@ public ProvidedStorageMap getProvidedStorageMap() { return providedStorageMap; } - /** - * Check whether file id has low redundancy blocks. - * - * @param inodeID - * - inode id - */ - public boolean hasLowRedundancyBlocks(long inodeID) { - namesystem.readLock(); - try { - BlockCollection bc = namesystem.getBlockCollection(inodeID); - if (bc == null) { - return false; - } - return hasLowRedundancyBlocks(bc); - } finally { - namesystem.readUnlock(); - } - } - /** * Create SPS manager instance. It manages the user invoked sps paths and does * the movement. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 9c96f16a4f..12b5c33f6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -793,7 +793,7 @@ public int getBlocksScheduled() { } /** Increment the number of blocks scheduled. */ - public void incrementBlocksScheduled(StorageType t) { + void incrementBlocksScheduled(StorageType t) { currApproxBlocksScheduled.add(t, 1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 4173f481bb..7d5d73c9a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1988,22 +1988,5 @@ public DatanodeStorageReport[] getDatanodeStorageReport( } return reports; } - - public boolean verifyTargetDatanodeHasSpaceForScheduling(DatanodeInfo dn, - StorageType type, long estimatedSize) { - namesystem.readLock(); - try { - DatanodeDescriptor datanode = - blockManager.getDatanodeManager().getDatanode(dn.getDatanodeUuid()); - if (datanode == null) { - LOG.debug("Target datanode: " + dn + " doesn't exists"); - return false; - } - return null != datanode.chooseStorage4Block(type, estimatedSize); - } finally { - namesystem.readUnlock(); - } - } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index dab8ae9bbb..f09ff6663b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -39,7 +39,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.client.BlockReportOptions; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -51,7 +50,6 @@ import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; -import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; @@ -534,20 +532,6 @@ HeartbeatResponse sendHeartBeat(boolean requestBlockReportLease) return response; } - private BlocksStorageMoveAttemptFinished getStorageMoveAttemptFinishedBlocks( - List finishedBlks) { - - if (finishedBlks.isEmpty()) { - return null; - } - - // Create BlocksStorageMoveAttemptFinished with currently finished - // blocks - Block[] blockList = new Block[finishedBlks.size()]; - finishedBlks.toArray(blockList); - return new BlocksStorageMoveAttemptFinished(blockList); - } - @VisibleForTesting void sendLifelineForTests() throws IOException { lifelineSender.sendLifeline(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7bc9ecccde..ecf7fce28b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3133,29 +3133,6 @@ void removeLeasesAndINodes(List removedUCFiles, } } - /** - * Get the file info for a specific file. - * - * @param src The string representation of the path to the file - * @param resolveLink whether to throw UnresolvedLinkException - * if src refers to a symlink - * @param needLocation if blockLocations need to be returned - * - * @throws AccessControlException - * if access is denied - * @throws UnresolvedLinkException - * if a symlink is encountered. - * - * @return object containing information regarding the file or null if file - * not found - * @throws StandbyException - */ - @Override - public HdfsFileStatus getFileInfo(final String src, boolean resolveLink, - boolean needLocation) throws IOException { - return getFileInfo(src, resolveLink, needLocation, false); - } - /** * Get the file info for a specific file. * @@ -3603,21 +3580,6 @@ BlockInfo getStoredBlock(Block block) { return blockManager.getStoredBlock(block); } - @Override - public boolean isFileOpenedForWrite(String path) { - readLock(); - try { - INode inode = dir.getINode(path, FSDirectory.DirOp.READ); - INodeFile iNodeFile = INodeFile.valueOf(inode, path); - LeaseManager.Lease lease = leaseManager.getLease(iNodeFile); - return lease != null; - } catch (IOException e) { - return false; - } finally { - readUnlock(); - } - } - @Override public boolean isInSnapshot(long blockCollectionID) { assert hasReadLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 82af4d234b..2a52587006 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -20,7 +20,6 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.util.RwLock; @@ -49,13 +48,6 @@ public interface Namesystem extends RwLock, SafeMode { */ boolean inTransitionToActive(); - /** - * Check if file is been opened for write purpose. - * @param filePath - * @return true if valid write lease exists, otherwise return false. - */ - boolean isFileOpenedForWrite(String filePath); - /** * Remove xAttr from the inode. * @param id @@ -63,18 +55,4 @@ public interface Namesystem extends RwLock, SafeMode { * @throws IOException */ void removeXattr(long id, String xattrName) throws IOException; - - /** - * Gets the fileInfo of the given file path. - * - * @param filePath string representation of the path to the file - * @param resolveLink whether to throw UnresolvedLinkException - * if src refers to a symlink - * @param needLocation if blockLocations need to be returned - * - * @return hdfs file status details - * @throws IOException - */ - HdfsFileStatus getFileInfo(String filePath, boolean resolveLink, - boolean needLocation) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java index b990bc5a0d..02b9cffe4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -79,7 +78,9 @@ public BlockStorageMovementNeeded(Context context) { * - track info for satisfy the policy */ public synchronized void add(ItemInfo trackInfo) { - storageMovementNeeded.add(trackInfo); + if (trackInfo != null) { + storageMovementNeeded.add(trackInfo); + } } /** @@ -153,7 +154,6 @@ public synchronized int size() { } public synchronized void clearAll() { - ctxt.removeAllSPSPathIds(); storageMovementNeeded.clear(); pendingWorkForDirectory.clear(); } @@ -188,18 +188,6 @@ public synchronized void removeItemTrackInfo(ItemInfo trackInfo, } } - public synchronized void clearQueue(long trackId) { - ctxt.removeSPSPathId(trackId); - Iterator iterator = storageMovementNeeded.iterator(); - while (iterator.hasNext()) { - ItemInfo next = iterator.next(); - if (next.getFile() == trackId) { - iterator.remove(); - } - } - pendingWorkForDirectory.remove(trackId); - } - /** * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded * and notify to clean up required resources. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java index afa5a501f7..b27294c27f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java @@ -22,8 +22,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.ParentNotDirectoryException; -import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -31,7 +29,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo; import org.apache.hadoop.net.NetworkTopology; -import org.apache.hadoop.security.AccessControlException; /** * An interface for the communication between SPS and Namenode module. @@ -50,21 +47,6 @@ public interface Context { */ boolean isInSafeMode(); - /** - * Returns true if Mover tool is already running, false otherwise. - */ - boolean isMoverRunning(); - - /** - * Gets the Inode ID number for the given path. - * - * @param path - * - file/dir path - * @return Inode id number - */ - long getFileID(String path) throws UnresolvedLinkException, - AccessControlException, ParentNotDirectoryException; - /** * Gets the network topology. * @@ -131,16 +113,6 @@ DatanodeStorageReport[] getLiveDatanodeStorageReport() */ Long getNextSPSPath(); - /** - * Removes the SPS path id. - */ - void removeSPSPathId(long pathId); - - /** - * Removes all SPS path ids. - */ - void removeAllSPSPathIds(); - /** * Do scan and collects the files under that directory and adds to the given * BlockStorageMovementNeeded. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java index a62dd93534..a83d32a347 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java @@ -47,12 +47,9 @@ public interface SPSService { * Starts the SPS service. Make sure to initialize the helper services before * invoking this method. * - * @param reconfigStart - * - to indicate whether the SPS startup requested from - * reconfiguration service * @param spsMode sps service mode */ - void start(boolean reconfigStart, StoragePolicySatisfierMode spsMode); + void start(StoragePolicySatisfierMode spsMode); /** * Stops the SPS service gracefully. Timed wait to stop storage policy diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java index 7ebd23d752..4c04b46678 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java @@ -150,21 +150,14 @@ public void init(final Context context) { * movements monitor for retry the attempts if needed. */ @Override - public synchronized void start(boolean reconfigStart, - StoragePolicySatisfierMode serviceMode) { + public synchronized void start(StoragePolicySatisfierMode serviceMode) { if (serviceMode == StoragePolicySatisfierMode.NONE) { LOG.error("Can't start StoragePolicySatisfier for the given mode:{}", serviceMode); return; } - if (reconfigStart) { - LOG.info("Starting {} StoragePolicySatisfier, as admin requested to " - + "start it.", StringUtils.toLowerCase(serviceMode.toString())); - } else { - LOG.info("Starting {} StoragePolicySatisfier.", - StringUtils.toLowerCase(serviceMode.toString())); - } - + LOG.info("Starting {} StoragePolicySatisfier.", + StringUtils.toLowerCase(serviceMode.toString())); isRunning = true; storagePolicySatisfierThread = new Daemon(this); storagePolicySatisfierThread.setName("StoragePolicySatisfier"); @@ -229,8 +222,8 @@ public void run() { } continue; } + ItemInfo itemInfo = null; try { - ItemInfo itemInfo = null; boolean retryItem = false; if (!ctxt.isInSafeMode()) { itemInfo = storageMovementNeeded.get(); @@ -324,12 +317,14 @@ public void run() { blockCount = 0L; } if (retryItem) { - // itemInfo.increRetryCount(); this.storageMovementNeeded.add(itemInfo); } } catch (IOException e) { LOG.error("Exception during StoragePolicySatisfier execution - " + "will continue next cycle", e); + // Since it could not finish this item in previous iteration due to IOE, + // just try again. + this.storageMovementNeeded.add(itemInfo); } catch (Throwable t) { synchronized (this) { if (isRunning) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java index 329303529e..8427e93a70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java @@ -24,10 +24,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -47,7 +44,6 @@ import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.net.NetworkTopology; -import org.apache.hadoop.security.AccessControlException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -92,35 +88,6 @@ public boolean isInSafeMode() { } } - @Override - public boolean isMoverRunning() { - try { - FSDataOutputStream out = nnc.getDistributedFileSystem() - .append(HdfsServerConstants.MOVER_ID_PATH); - out.close(); - return false; - } catch (IOException ioe) { - LOG.warn("Exception while checking mover is running..", ioe); - return true; - } - - } - - @Override - public long getFileID(String path) throws UnresolvedLinkException, - AccessControlException, ParentNotDirectoryException { - HdfsFileStatus fs = null; - try { - fs = (HdfsFileStatus) nnc.getDistributedFileSystem().getFileStatus( - new Path(path)); - LOG.info("Fetched the fileID:{} for the path:{}", fs.getFileId(), path); - } catch (IllegalArgumentException | IOException e) { - LOG.warn("Exception while getting file is for the given path:{}.", path, - e); - } - return fs != null ? fs.getFileId() : 0; - } - @Override public NetworkTopology getNetworkTopology(DatanodeMap datanodeMap) { // create network topology. @@ -152,8 +119,18 @@ public BlockStoragePolicy getStoragePolicy(byte policyId) { @Override public void removeSPSHint(long inodeId) throws IOException { Path filePath = DFSUtilClient.makePathFromFileId(inodeId); - nnc.getDistributedFileSystem().removeXAttr(filePath, - HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY); + try { + nnc.getDistributedFileSystem().removeXAttr(filePath, + HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY); + } catch (IOException e) { + List listXAttrs = nnc.getDistributedFileSystem() + .listXAttrs(filePath); + if (!listXAttrs + .contains(HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY)) { + LOG.info("SPS hint already removed for the inodeId:{}." + + " Ignoring exception:{}", inodeId, e.getMessage()); + } + } } @Override @@ -196,16 +173,6 @@ public Long getNextSPSPath() { } } - @Override - public void removeSPSPathId(long pathId) { - // We need not specifically implement for external. - } - - @Override - public void removeAllSPSPathIds() { - // We need not specifically implement for external. - } - @Override public void scanAndCollectFiles(long path) throws IOException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java index 8e19a7ca6e..15cdc6eb47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java @@ -70,7 +70,7 @@ public static void main(String[] args) throws Exception { ExternalSPSContext context = new ExternalSPSContext(sps, nnc); sps.init(context); - sps.start(true, StoragePolicySatisfierMode.EXTERNAL); + sps.start(StoragePolicySatisfierMode.EXTERNAL); if (sps != null) { sps.join(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md index 37897793a1..5fd66125e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md @@ -154,7 +154,7 @@ Note that, when both -p and -f options are omitted, the default path is the root ####Administrator notes: -`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a Mover instance is already triggered and running, SPS will be disabled while starting. In that case, administrator should make sure, Mover execution finished and then enable(internal service inside NN or external service outside NN) SPS again. Similarly when SPS enabled already, Mover cannot be run. If administrator is looking to run Mover tool explicitly, then he/she should make sure to disable SPS first and then run Mover. Please look at the commands section to know how to enable(internal service inside NN or external service outside NN) or disable SPS dynamically. +`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a Mover instance is already triggered and running, SPS will be disabled while starting. In that case, administrator should make sure, Mover execution finished and then enable external SPS service again. Similarly when SPS enabled already, Mover cannot be run. If administrator is looking to run Mover tool explicitly, then he/she should make sure to disable SPS first and then run Mover. Please look at the commands section to know how to enable external service outside NN or disable SPS dynamically. Storage Policy Commands ----------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java index 2ad8640ab5..1ac9257a57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java @@ -113,6 +113,8 @@ private void clusterSetUp(boolean isHAEnabled, Configuration newConf) // Reduced refresh cycle to update latest datanodes. conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS, 1000); + conf.setInt( + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MAX_RETRY_ATTEMPTS_KEY, 20); final int dnNumber = storageTypes.length; final short replication = 3; MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf) @@ -136,7 +138,7 @@ private void clusterSetUp(boolean isHAEnabled, Configuration newConf) ctxt = new ExternalSPSContext(sps, nnc); sps.init(ctxt); - sps.start(true, StoragePolicySatisfierMode.EXTERNAL); + sps.start(StoragePolicySatisfierMode.EXTERNAL); createTestFiles(fs, replication); } @@ -188,6 +190,7 @@ private void clusterShutdown() throws IOException{ */ @Test(timeout = 300000) public void testWithCheckpoint() throws Exception { + SecondaryNameNode secondary = null; try { clusterSetUp(); fs.setStoragePolicy(testFile, WARM); @@ -196,7 +199,7 @@ public void testWithCheckpoint() throws Exception { // Start the checkpoint. conf.set( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - SecondaryNameNode secondary = new SecondaryNameNode(conf); + secondary = new SecondaryNameNode(conf); secondary.doCheckpoint(); restartCluster(); @@ -214,6 +217,9 @@ public void testWithCheckpoint() throws Exception { childFileName, StorageType.ARCHIVE, 3, timeout, fs); } finally { + if (secondary != null) { + secondary.shutdown(); + } clusterShutdown(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java index 250e54b581..018a5dc69d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java @@ -510,7 +510,7 @@ private void startSPS() throws IOException { sps = new StoragePolicySatisfier(conf); ctxt = new ExternalSPSContext(sps, nnc); sps.init(ctxt); - sps.start(true, StoragePolicySatisfierMode.EXTERNAL); + sps.start(StoragePolicySatisfierMode.EXTERNAL); } private static void initConfWithStripe(Configuration conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java index d9a93fd095..53878e0716 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java @@ -219,7 +219,7 @@ private MiniDFSCluster startCluster(final Configuration conf, externalCtxt = new ExternalSPSContext(externalSps, nnc); externalSps.init(externalCtxt); - externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL); + externalSps.start(StoragePolicySatisfierMode.EXTERNAL); return cluster; } @@ -234,7 +234,7 @@ private void restartNamenode() throws IOException{ externalCtxt = new ExternalSPSContext(externalSps, nnc); externalSps.init(externalCtxt); - externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL); + externalSps.start(StoragePolicySatisfierMode.EXTERNAL); } private void initSecureConf(Configuration conf) throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java index 1ab77888cc..61fccfac54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java @@ -71,7 +71,7 @@ public void clusterSetUp() throws IOException, URISyntaxException { Context externalCtxt = new ExternalSPSContext(externalSps, nnc); externalSps.init(externalCtxt); - externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL); + externalSps.start(StoragePolicySatisfierMode.EXTERNAL); } @After