HDFS-10885. [SPS]: Mover tool should not be allowed to run when Storage Policy Satisfier is on. Contributed by Wei Zhou

This commit is contained in:
Rakesh Radhakrishnan 2016-12-06 17:56:08 +05:30 committed by Uma Maheswara Rao Gangumalla
parent b07291e176
commit cd5262aba0
18 changed files with 258 additions and 14 deletions

View File

@ -3109,6 +3109,10 @@ public void satisfyStoragePolicy(String src) throws IOException {
} }
} }
public boolean isStoragePolicySatisfierRunning() throws IOException {
return namenode.isStoragePolicySatisfierRunning();
}
Tracer getTracer() { Tracer getTracer() {
return tracer; return tracer;
} }

View File

@ -1756,4 +1756,12 @@ BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
*/ */
@Idempotent @Idempotent
void satisfyStoragePolicy(String path) throws IOException; void satisfyStoragePolicy(String path) throws IOException;
/**
* Check if StoragePolicySatisfier is running.
* @return true if StoragePolicySatisfier is running
* @throws IOException
*/
@Idempotent
boolean isStoragePolicySatisfierRunning() throws IOException;
} }

View File

@ -147,6 +147,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@ -295,6 +297,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
private final static GetErasureCodingCodecsRequestProto private final static GetErasureCodingCodecsRequestProto
VOID_GET_EC_CODEC_REQUEST = GetErasureCodingCodecsRequestProto VOID_GET_EC_CODEC_REQUEST = GetErasureCodingCodecsRequestProto
.newBuilder().build(); .newBuilder().build();
private final static IsStoragePolicySatisfierRunningRequestProto
VOID_IS_SPS_RUNNING_REQUEST = IsStoragePolicySatisfierRunningRequestProto
.newBuilder().build();
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) { public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
rpcProxy = proxy; rpcProxy = proxy;
@ -1901,6 +1907,18 @@ public ErasureCodingPolicy getErasureCodingPolicy(String src)
} }
} }
@Override
public boolean isStoragePolicySatisfierRunning() throws IOException {
try {
IsStoragePolicySatisfierRunningResponseProto rep =
rpcProxy.isStoragePolicySatisfierRunning(null,
VOID_IS_SPS_RUNNING_REQUEST);
return rep.getRunning();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override @Override
public QuotaUsage getQuotaUsage(String path) throws IOException { public QuotaUsage getQuotaUsage(String path) throws IOException {
GetQuotaUsageRequestProto req = GetQuotaUsageRequestProto req =

View File

@ -839,6 +839,13 @@ message SatisfyStoragePolicyResponseProto {
} }
message IsStoragePolicySatisfierRunningRequestProto { // no parameters
}
message IsStoragePolicySatisfierRunningResponseProto {
required bool running = 1;
}
service ClientNamenodeProtocol { service ClientNamenodeProtocol {
rpc getBlockLocations(GetBlockLocationsRequestProto) rpc getBlockLocations(GetBlockLocationsRequestProto)
returns(GetBlockLocationsResponseProto); returns(GetBlockLocationsResponseProto);
@ -1027,4 +1034,6 @@ service ClientNamenodeProtocol {
returns(ListOpenFilesResponseProto); returns(ListOpenFilesResponseProto);
rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto) rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto)
returns(SatisfyStoragePolicyResponseProto); returns(SatisfyStoragePolicyResponseProto);
rpc isStoragePolicySatisfierRunning(IsStoragePolicySatisfierRunningRequestProto)
returns(IsStoragePolicySatisfierRunningResponseProto);
} }

View File

@ -613,6 +613,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.mover.max-no-move-interval"; public static final String DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.mover.max-no-move-interval";
public static final int DFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute public static final int DFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute
public static final String DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
"dfs.storage.policy.satisfier.activate";
public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
true;
public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address"; public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
public static final int DFS_DATANODE_DEFAULT_PORT = 9866; public static final int DFS_DATANODE_DEFAULT_PORT = 9866;
public static final String DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_DEFAULT_PORT; public static final String DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_DEFAULT_PORT;

View File

@ -159,6 +159,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@ -1859,6 +1861,22 @@ public GetErasureCodingPolicyResponseProto getErasureCodingPolicy(RpcController
} }
} }
@Override
public IsStoragePolicySatisfierRunningResponseProto
isStoragePolicySatisfierRunning(RpcController controller,
IsStoragePolicySatisfierRunningRequestProto req)
throws ServiceException {
try {
boolean ret = server.isStoragePolicySatisfierRunning();
IsStoragePolicySatisfierRunningResponseProto.Builder builder =
IsStoragePolicySatisfierRunningResponseProto.newBuilder();
builder.setRunning(ret);
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override @Override
public GetQuotaUsageResponseProto getQuotaUsage( public GetQuotaUsageResponseProto getQuotaUsage(
RpcController controller, GetQuotaUsageRequestProto req) RpcController controller, GetQuotaUsageRequestProto req)

View File

@ -30,7 +30,8 @@ public enum ExitStatus {
IO_EXCEPTION(-4), IO_EXCEPTION(-4),
ILLEGAL_ARGUMENTS(-5), ILLEGAL_ARGUMENTS(-5),
INTERRUPTED(-6), INTERRUPTED(-6),
UNFINALIZED_UPGRADE(-7); UNFINALIZED_UPGRADE(-7),
SKIPPED_DUE_TO_SPS(-8);
private final int code; private final int code;

View File

@ -471,7 +471,24 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled,
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT) DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
* 1000L); * 1000L);
sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this); final boolean storagePolicyEnabled =
conf.getBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
final boolean spsEnabled =
conf.getBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT);
if (storagePolicyEnabled && spsEnabled) {
sps = new StoragePolicySatisfier(namesystem,
storageMovementNeeded, this);
} else {
sps = null;
LOG.warn(
"Failed to start StoragePolicySatisfier"
+ " since {} set to {} and {} set to {}.",
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled,
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, spsEnabled);
}
blockTokenSecretManager = createBlockTokenSecretManager(conf); blockTokenSecretManager = createBlockTokenSecretManager(conf);
providedStorageMap = new ProvidedStorageMap(namesystem, this, conf); providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@ -696,11 +713,15 @@ public void activate(Configuration conf, long blockTotal) {
this.blockReportThread.start(); this.blockReportThread.start();
mxBeanName = MBeans.register("NameNode", "BlockStats", this); mxBeanName = MBeans.register("NameNode", "BlockStats", this);
bmSafeMode.activate(blockTotal); bmSafeMode.activate(blockTotal);
sps.start(); if (sps != null) {
sps.start();
}
} }
public void close() { public void close() {
sps.stop(); if (sps != null) {
sps.stop();
}
bmSafeMode.close(); bmSafeMode.close();
try { try {
redundancyThread.interrupt(); redundancyThread.interrupt();

View File

@ -24,6 +24,7 @@
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
@ -364,6 +365,8 @@ enum BlockUCState {
String XATTR_ERASURECODING_POLICY = String XATTR_ERASURECODING_POLICY =
"system.hdfs.erasurecoding.policy"; "system.hdfs.erasurecoding.policy";
Path MOVER_ID_PATH = new Path("/system/mover.id");
long BLOCK_GROUP_INDEX_MASK = 15; long BLOCK_GROUP_INDEX_MASK = 15;
byte MAX_BLOCKS_IN_GROUP = 16; byte MAX_BLOCKS_IN_GROUP = 16;
} }

View File

@ -41,11 +41,14 @@
import org.apache.hadoop.hdfs.server.balancer.Matcher; import org.apache.hadoop.hdfs.server.balancer.Matcher;
import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector; import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
@ -70,8 +73,6 @@
public class Mover { public class Mover {
static final Log LOG = LogFactory.getLog(Mover.class); static final Log LOG = LogFactory.getLog(Mover.class);
static final Path MOVER_ID_PATH = new Path("/system/mover.id");
private static class StorageMap { private static class StorageMap {
private final StorageGroupMap<Source> sources private final StorageGroupMap<Source> sources
= new StorageGroupMap<Source>(); = new StorageGroupMap<Source>();
@ -645,7 +646,7 @@ static int run(Map<URI, List<Path>> namenodes, Configuration conf)
List<NameNodeConnector> connectors = Collections.emptyList(); List<NameNodeConnector> connectors = Collections.emptyList();
try { try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes, connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
Mover.class.getSimpleName(), MOVER_ID_PATH, conf, Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS); NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
while (connectors.size() > 0) { while (connectors.size() > 0) {
@ -655,6 +656,22 @@ static int run(Map<URI, List<Path>> namenodes, Configuration conf)
NameNodeConnector nnc = iter.next(); NameNodeConnector nnc = iter.next();
final Mover m = new Mover(nnc, conf, retryCount, final Mover m = new Mover(nnc, conf, retryCount,
excludedPinnedBlocks); excludedPinnedBlocks);
boolean spsRunning;
try {
spsRunning = nnc.getDistributedFileSystem().getClient()
.isStoragePolicySatisfierRunning();
} catch (StandbyException e) {
System.err.println("Skip Standby Namenode. " + nnc.toString());
continue;
}
if (spsRunning) {
System.err.println("Mover failed due to StoragePolicySatisfier"
+ " is running. Exiting with status "
+ ExitStatus.SKIPPED_DUE_TO_SPS + "... ");
return ExitStatus.SKIPPED_DUE_TO_SPS.getExitCode();
}
final ExitStatus r = m.run(); final ExitStatus r = m.run();
if (r == ExitStatus.SUCCESS) { if (r == ExitStatus.SUCCESS) {

View File

@ -3897,8 +3897,10 @@ nodeReg, reports, getBlockPoolId(), cacheCapacity, cacheUsed,
// TODO: Handle blocks movement results send by the coordinator datanode. // TODO: Handle blocks movement results send by the coordinator datanode.
// This has to be revisited as part of HDFS-11029. // This has to be revisited as part of HDFS-11029.
blockManager.getStoragePolicySatisfier() if (blockManager.getStoragePolicySatisfier() != null) {
.handleBlocksStorageMovementResults(blksMovementResults); blockManager.getStoragePolicySatisfier()
.handleBlocksStorageMovementResults(blksMovementResults);
}
//create ha status //create ha status
final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat( final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(

View File

@ -2521,4 +2521,15 @@ public List<String> listReconfigurableProperties() throws IOException {
namesystem.logAuditEvent(true, operationName, null); namesystem.logAuditEvent(true, operationName, null);
return result; return result;
} }
@Override
public boolean isStoragePolicySatisfierRunning() throws IOException {
checkNNStartup();
if (nn.isStandbyState()) {
throw new StandbyException("Not supported by Standby Namenode.");
}
StoragePolicySatisfier sps = namesystem.getBlockManager()
.getStoragePolicySatisfier();
return sps != null && sps.isRunning();
}
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
@ -38,6 +39,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo; import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult; import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@ -70,6 +72,7 @@ public class StoragePolicySatisfier implements Runnable {
private final BlockManager blockManager; private final BlockManager blockManager;
private final BlockStorageMovementNeeded storageMovementNeeded; private final BlockStorageMovementNeeded storageMovementNeeded;
private final BlockStorageMovementAttemptedItems storageMovementsMonitor; private final BlockStorageMovementAttemptedItems storageMovementsMonitor;
private volatile boolean isRunning = false;
public StoragePolicySatisfier(final Namesystem namesystem, public StoragePolicySatisfier(final Namesystem namesystem,
final BlockStorageMovementNeeded storageMovementNeeded, final BlockStorageMovementNeeded storageMovementNeeded,
@ -99,6 +102,7 @@ public void start() {
* Stop storage policy satisfier demon thread. * Stop storage policy satisfier demon thread.
*/ */
public void stop() { public void stop() {
isRunning = false;
if (storagePolicySatisfierThread == null) { if (storagePolicySatisfierThread == null) {
return; return;
} }
@ -110,8 +114,40 @@ public void stop() {
this.storageMovementsMonitor.stop(); this.storageMovementsMonitor.stop();
} }
/**
* Check whether StoragePolicySatisfier is running.
* @return true if running
*/
public boolean isRunning() {
return isRunning;
}
// Return true if a Mover instance is running
private boolean checkIfMoverRunning() {
boolean ret = false;
try {
String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
INode inode = namesystem.getFSDirectory().getINode(
moverId, FSDirectory.DirOp.READ);
if (inode != null) {
ret = true;
}
} catch (IOException e) {
LOG.info("StoragePolicySatisfier is enabled as no Mover ID file found.");
ret = false;
}
return ret;
}
@Override @Override
public void run() { public void run() {
isRunning = !checkIfMoverRunning();
if (!isRunning) {
LOG.error("StoragePolicySatisfier thread stopped "
+ "as Mover ID file " + HdfsServerConstants.MOVER_ID_PATH.toString()
+ " exists");
return;
}
while (namesystem.isRunning()) { while (namesystem.isRunning()) {
try { try {
Long blockCollectionID = storageMovementNeeded.get(); Long blockCollectionID = storageMovementNeeded.get();
@ -123,6 +159,7 @@ public void run() {
// we want to check block movements. // we want to check block movements.
Thread.sleep(3000); Thread.sleep(3000);
} catch (Throwable t) { } catch (Throwable t) {
isRunning = false;
if (!namesystem.isRunning()) { if (!namesystem.isRunning()) {
LOG.info("Stopping StoragePolicySatisfier."); LOG.info("Stopping StoragePolicySatisfier.");
if (!(t instanceof InterruptedException)) { if (!(t instanceof InterruptedException)) {

View File

@ -4495,6 +4495,15 @@
</description> </description>
</property> </property>
<property>
<name>dfs.storage.policy.satisfier.activate</name>
<value>true</value>
<description>
If true, activate StoragePolicySatisfier.
By default, StoragePolicySatisfier is activated.
</description>
</property>
<property> <property>
<name>dfs.pipeline.ecn</name> <name>dfs.pipeline.ecn</name>
<value>false</value> <value>false</value>

View File

@ -67,6 +67,8 @@ private static void initConf(Configuration conf) {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
1L); 1L);
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
true);
} }
@Before @Before

View File

@ -78,6 +78,7 @@
import org.apache.hadoop.hdfs.server.balancer.TestBalancer; import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation; import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig;
@ -113,6 +114,8 @@ static void initConf(Configuration conf) {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
1L); 1L);
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
} }
static Mover newMover(Configuration conf) throws IOException { static Mover newMover(Configuration conf) throws IOException {
@ -124,7 +127,7 @@ static Mover newMover(Configuration conf) throws IOException {
} }
final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors( final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf, nnMap, Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS); NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>()); return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>());
} }
@ -132,6 +135,8 @@ static Mover newMover(Configuration conf) throws IOException {
@Test @Test
public void testScheduleSameBlock() throws IOException { public void testScheduleSameBlock() throws IOException {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(4).build(); .numDataNodes(4).build();
try { try {
@ -454,8 +459,11 @@ private void checkMovePaths(List<Path> actual, Path... expected) {
*/ */
@Test @Test
public void testMoverCli() throws Exception { public void testMoverCli() throws Exception {
final Configuration clusterConf = new HdfsConfiguration();
clusterConf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration()).numDataNodes(0).build(); .Builder(clusterConf).numDataNodes(0).build();
try { try {
final Configuration conf = cluster.getConfiguration(0); final Configuration conf = cluster.getConfiguration(0);
try { try {
@ -487,8 +495,10 @@ public void testMoverCli() throws Exception {
@Test @Test
public void testMoverCliWithHAConf() throws Exception { public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration()) .Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()) .nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0).build(); .numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster"); HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
@ -509,11 +519,16 @@ public void testMoverCliWithHAConf() throws Exception {
@Test @Test
public void testMoverCliWithFederation() throws Exception { public void testMoverCliWithFederation() throws Exception {
final Configuration clusterConf = new HdfsConfiguration();
clusterConf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration()) .Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3)) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
.numDataNodes(0).build(); .numDataNodes(0).build();
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSTestUtil.setFederatedConfiguration(cluster, conf); DFSTestUtil.setFederatedConfiguration(cluster, conf);
try { try {
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@ -557,11 +572,16 @@ public void testMoverCliWithFederation() throws Exception {
@Test @Test
public void testMoverCliWithFederationHA() throws Exception { public void testMoverCliWithFederationHA() throws Exception {
final Configuration clusterConf = new HdfsConfiguration();
clusterConf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration()) .Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3)) .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
.numDataNodes(0).build(); .numDataNodes(0).build();
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSTestUtil.setFederatedHAConfiguration(cluster, conf); DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
try { try {
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@ -625,6 +645,8 @@ public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
public void testMoveWhenStoragePolicyNotSatisfying() throws Exception { public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
// HDFS-8147 // HDFS-8147
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3) .numDataNodes(3)
.storageTypes( .storageTypes(
@ -650,6 +672,36 @@ public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
} }
} }
@Test(timeout = 300000)
public void testMoveWhenStoragePolicySatisfierIsRunning() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[][] {{StorageType.DISK}, {StorageType.DISK},
{StorageType.DISK}}).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoveWhenStoragePolicySatisfierIsRunning";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testMoveWhenStoragePolicySatisfierIsRunning");
out.close();
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] {"-p", file.toString()});
int exitcode = ExitStatus.SKIPPED_DUE_TO_SPS.getExitCode();
Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
} finally {
cluster.shutdown();
}
}
@Test @Test
public void testMoverFailedRetry() throws Exception { public void testMoverFailedRetry() throws Exception {
// HDFS-8147 // HDFS-8147
@ -746,6 +798,8 @@ void initConfWithStripe(Configuration conf) {
1L); 1L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false); false);
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
} }
@Test(timeout = 300000) @Test(timeout = 300000)

View File

@ -96,6 +96,8 @@ public class TestStorageMover {
DEFAULT_CONF.setLong( DEFAULT_CONF.setLong(
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 2L); DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 2L);
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L); DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
DEFAULT_CONF.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite(); DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME); HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);

View File

@ -31,12 +31,14 @@
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -442,6 +444,27 @@ public void testWhenNoTargetDatanodeToSatisfyStoragePolicy()
} }
} }
/**
* Tests to verify that SPS should not start when a Mover instance
* is running.
*/
@Test(timeout = 300000)
public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
throws IOException {
try {
// Simulate Mover by creating MOVER_ID file
DFSTestUtil.createFile(hdfsCluster.getFileSystem(),
HdfsServerConstants.MOVER_ID_PATH, 0, (short) 1, 0);
hdfsCluster.restartNameNode(true);
boolean running = hdfsCluster.getFileSystem()
.getClient().isStoragePolicySatisfierRunning();
Assert.assertFalse("SPS should not start "
+ "when a Mover instance is running", running);
} finally {
hdfsCluster.shutdown();
}
}
private void waitForAttemptedItems(long expectedBlkMovAttemptedCount, private void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
int timeout) throws TimeoutException, InterruptedException { int timeout) throws TimeoutException, InterruptedException {
BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager(); BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();