HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.

This commit is contained in:
Uma Maheswara Rao G 2017-08-17 13:21:07 -07:00 committed by Uma Maheswara Rao Gangumalla
parent 3b601f2c0e
commit 0e820f16af
22 changed files with 266 additions and 162 deletions

View File

@ -62,7 +62,7 @@ function hadoop_usage
hadoop_add_subcommand "portmap" daemon "run a portmap service"
hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
hadoop_add_subcommand "storagepolicies" admin "list/get/set/satisfyStoragePolicy block storage policies"
hadoop_add_subcommand "version" client "print the version"
hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false

View File

@ -614,10 +614,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute
// SPS related configurations
public static final String DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
"dfs.storage.policy.satisfier.activate";
public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
true;
public static final String DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY =
"dfs.storage.policy.satisfier.enabled";
public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
false;
public static final String DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
"dfs.storage.policy.satisfier.recheck.timeout.millis";
public static final int DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

View File

@ -429,6 +429,8 @@ public long getTotalECBlockGroups() {
/** For satisfying block storage policies. */
private final StoragePolicySatisfier sps;
private final boolean storagePolicyEnabled;
private boolean spsEnabled;
private final BlockStorageMovementNeeded storageMovementNeeded =
new BlockStorageMovementNeeded();
@ -440,15 +442,9 @@ public long getTotalECBlockGroups() {
/** Storages accessible from multiple DNs. */
private final ProvidedStorageMap providedStorageMap;
/**
* Whether HA is enabled.
*/
private final boolean haEnabled;
public BlockManager(final Namesystem namesystem, boolean haEnabled,
final Configuration conf) throws IOException {
this.namesystem = namesystem;
this.haEnabled = haEnabled;
datanodeManager = new DatanodeManager(this, namesystem, conf);
heartbeatManager = datanodeManager.getHeartbeatManager();
this.blockIdManager = new BlockIdManager(this);
@ -477,24 +473,15 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled,
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
* 1000L);
final boolean storagePolicyEnabled =
storagePolicyEnabled =
conf.getBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
final boolean spsEnabled =
spsEnabled =
conf.getBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT);
if (storagePolicyEnabled && spsEnabled) {
sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
conf);
} else {
sps = null;
LOG.warn(
"Failed to start StoragePolicySatisfier"
+ " since {} set to {} and {} set to {}.",
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled,
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, spsEnabled);
}
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
conf);
blockTokenSecretManager = createBlockTokenSecretManager(conf);
providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@ -719,15 +706,10 @@ public void activate(Configuration conf, long blockTotal) {
this.blockReportThread.start();
mxBeanName = MBeans.register("NameNode", "BlockStats", this);
bmSafeMode.activate(blockTotal);
if (sps != null && !haEnabled) {
sps.start(false);
}
}
public void close() {
if (sps != null) {
sps.deactivate(false);
}
stopSPS(false);
bmSafeMode.close();
try {
redundancyThread.interrupt();
@ -5058,46 +5040,86 @@ public StoragePolicySatisfier getStoragePolicySatisfier() {
}
/**
* Activate the storage policy satisfier by starting its service.
* Start storage policy satisfier service.
*/
public void activateSPS() {
if (sps == null) {
LOG.info("Storage policy satisfier is not initialized.");
public void startSPS() {
if (!(storagePolicyEnabled && spsEnabled)) {
LOG.info(
"Failed to start StoragePolicySatisfier "
+ " as {} set to {} and {} set to {}.",
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled,
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, spsEnabled);
return;
} else if (sps.isRunning()) {
LOG.info("Storage policy satisfier is already running.");
return;
}
sps.start(false);
}
/**
* Stop storage policy satisfier service.
*
* @param forceStop
* true represents that it should stop SPS service by clearing all
* pending SPS work
*/
public void stopSPS(boolean forceStop) {
if (!(storagePolicyEnabled && spsEnabled)) {
LOG.info("Storage policy satisfier is not enabled.");
return;
} else if (!sps.isRunning()) {
LOG.info("Storage policy satisfier is already stopped.");
return;
}
sps.disable(forceStop);
}
/**
* Enable storage policy satisfier by starting its service.
*/
public void enableSPS() {
if (!storagePolicyEnabled){
LOG.info("Failed to start StoragePolicySatisfier as {} set to {}.",
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled);
return;
}
spsEnabled = true;
if (sps.isRunning()) {
LOG.info("Storage policy satisfier is already running.");
return;
}
sps.start(true);
}
/**
* Deactivate the storage policy satisfier by stopping its services.
* Disable the storage policy satisfier by stopping its services.
*/
public void deactivateSPS() {
if (sps == null) {
LOG.info("Storage policy satisfier is not initialized.");
return;
} else if (!sps.isRunning()) {
public void disableSPS() {
spsEnabled = false;
if (!sps.isRunning()) {
LOG.info("Storage policy satisfier is already stopped.");
return;
}
sps.deactivate(true);
LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
+ "stop it.");
sps.disable(true);
}
/**
* Timed wait to stop storage policy satisfier daemon threads.
*/
public void stopSPSGracefully() {
if (sps != null) {
sps.stopGracefully();
}
sps.stopGracefully();
}
/**
* @return True if storage policy satisfier running.
*/
public boolean isStoragePolicySatisfierRunning() {
return sps == null ? false : sps.isRunning();
return sps.isRunning();
}
}

View File

@ -136,7 +136,7 @@ public synchronized void start() {
* Sets running flag to false. Also, this will interrupt monitor thread and
* clear all the queued up tasks.
*/
public synchronized void deactivate() {
public synchronized void stop() {
monitorRunning = false;
if (timerThread != null) {
timerThread.interrupt();
@ -152,7 +152,7 @@ synchronized void stopGracefully() {
return;
}
if (monitorRunning) {
deactivate();
stop();
}
try {
timerThread.join(3000);

View File

@ -1291,7 +1291,7 @@ void startActiveServices() throws IOException {
edekCacheLoaderDelay, edekCacheLoaderInterval);
}
blockManager.activateSPS();
blockManager.startSPS();
} finally {
startingActiveService = false;
blockManager.checkSafeMode();
@ -1322,7 +1322,7 @@ void stopActiveServices() {
writeLock();
try {
if (blockManager != null) {
blockManager.deactivateSPS();
blockManager.stopSPS(true);
}
stopSecretManager();
leaseManager.stopMonitor();
@ -2241,6 +2241,8 @@ void setStoragePolicy(String src, String policyName) throws IOException {
*/
void satisfyStoragePolicy(String src, boolean logRetryCache)
throws IOException {
final String operationName = "satisfyStoragePolicy";
FileStatus auditStat;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
@ -2258,16 +2260,20 @@ void satisfyStoragePolicy(String src, boolean logRetryCache)
|| !blockManager.getStoragePolicySatisfier().isRunning()) {
throw new UnsupportedActionException(
"Cannot request to satisfy storage policy "
+ "when storage policy satisfier feature has been deactivated"
+ " by admin. Seek for an admin help to activate it "
+ "when storage policy satisfier feature has been disabled"
+ " by admin. Seek for an admin help to enable it "
+ "or use Mover tool.");
}
FSDirSatisfyStoragePolicyOp.satisfyStoragePolicy(dir, blockManager, src,
logRetryCache);
auditStat = FSDirSatisfyStoragePolicyOp.satisfyStoragePolicy(
dir, blockManager, src, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock();
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
/**

View File

@ -160,7 +160,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE;
@ -295,7 +295,7 @@ public enum OperationCategory {
DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
FS_PROTECTED_DIRECTORIES,
HADOOP_CALLER_CONTEXT_ENABLED_KEY,
DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY));
DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@ -2041,8 +2041,8 @@ protected String reconfigurePropertyImpl(String property, String newVal)
return reconfCallerContextEnabled(newVal);
} else if (property.equals(ipcClientRPCBackoffEnable)) {
return reconfigureIPCBackoffEnabled(newVal);
} else if (property.equals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY)) {
return reconfigureSPSActivate(newVal, property);
} else if (property.equals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY)) {
return reconfigureSPSEnabled(newVal, property);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
@ -2126,29 +2126,29 @@ String reconfigureIPCBackoffEnabled(String newVal) {
return Boolean.toString(clientBackoffEnabled);
}
String reconfigureSPSActivate(String newVal, String property)
String reconfigureSPSEnabled(String newVal, String property)
throws ReconfigurationException {
if (newVal == null || !(newVal.equalsIgnoreCase(Boolean.TRUE.toString())
|| newVal.equalsIgnoreCase(Boolean.FALSE.toString()))) {
throw new ReconfigurationException(property, newVal,
getConf().get(property),
new HadoopIllegalArgumentException(
"For activating or deactivating storage policy satisfier, "
"For enabling or disabling storage policy satisfier, "
+ "we must pass true/false only"));
}
if (!isActiveState()) {
throw new ReconfigurationException(property, newVal,
getConf().get(property), new HadoopIllegalArgumentException(
"Activating or deactivating storage policy satisfier service on "
"Enabling or disabling storage policy satisfier service on "
+ state + " NameNode is not allowed"));
}
boolean activateSPS = Boolean.parseBoolean(newVal);
if (activateSPS) {
namesystem.getBlockManager().activateSPS();
boolean enableSPS = Boolean.parseBoolean(newVal);
if (enableSPS) {
namesystem.getBlockManager().enableSPS();
} else {
namesystem.getBlockManager().deactivateSPS();
namesystem.getBlockManager().disableSPS();
}
return newVal;
}

View File

@ -138,7 +138,7 @@ public synchronized void start(boolean reconfigStart) {
}
if (reconfigStart) {
LOG.info("Starting StoragePolicySatisfier, as admin requested to "
+ "activate it.");
+ "start it.");
} else {
LOG.info("Starting StoragePolicySatisfier.");
}
@ -154,23 +154,21 @@ public synchronized void start(boolean reconfigStart) {
}
/**
* Deactivates storage policy satisfier by stopping its services.
* Disables storage policy satisfier by stopping its services.
*
* @param reconfig
* true represents deactivating SPS service as requested by admin,
* false otherwise
* @param forceStop
* true represents that it should stop SPS service by clearing all
* pending SPS work
*/
public synchronized void deactivate(boolean reconfig) {
public synchronized void disable(boolean forceStop) {
isRunning = false;
if (storagePolicySatisfierThread == null) {
return;
}
storagePolicySatisfierThread.interrupt();
this.storageMovementsMonitor.deactivate();
if (reconfig) {
LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
+ "deactivate it.");
this.storageMovementsMonitor.stop();
if (forceStop) {
this.clearQueuesWithNotification();
addDropSPSWorkCommandsToAllDNs();
} else {
@ -183,7 +181,7 @@ public synchronized void deactivate(boolean reconfig) {
*/
public synchronized void stopGracefully() {
if (isRunning) {
deactivate(true);
disable(true);
}
this.storageMovementsMonitor.stopGracefully();
if (storagePolicySatisfierThread == null) {

View File

@ -40,7 +40,7 @@ public class BlocksStorageMovementResult {
* IN_PROGRESS - If all or some of the blocks associated to track id are
* still moving.
*/
public static enum Status {
public enum Status {
SUCCESS, FAILURE, IN_PROGRESS;
}

View File

@ -249,8 +249,8 @@ public int run(Configuration conf, List<String> args) throws IOException {
}
/** Command to schedule blocks to move based on specified policy. */
private static class SatisfyStoragePolicyCommand implements
AdminHelper.Command {
private static class SatisfyStoragePolicyCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-satisfyStoragePolicy";
@ -294,10 +294,11 @@ public int run(Configuration conf, List<String> args) throws IOException {
}
/** Command to check storage policy satisfier status. */
private static class IsSPSRunningCommand implements AdminHelper.Command {
private static class IsSatisfierRunningCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-isSPSRunning";
return "-isSatisfierRunning";
}
@Override
@ -386,6 +387,6 @@ public int run(Configuration conf, List<String> args) throws IOException {
new GetStoragePolicyCommand(),
new UnsetStoragePolicyCommand(),
new SatisfyStoragePolicyCommand(),
new IsSPSRunningCommand()
new IsSatisfierRunningCommand()
};
}

View File

@ -4496,13 +4496,13 @@
</property>
<property>
<name>dfs.storage.policy.satisfier.activate</name>
<value>true</value>
<name>dfs.storage.policy.satisfier.enabled</name>
<value>false</value>
<description>
If true, StoragePolicySatisfier will be started along with active namenode.
By default, StoragePolicySatisfier is activated.
Administrator can dynamically activate or deactivate StoragePolicySatisfier by using reconfiguration option.
Dynamic activation/deactivation option can be achieved in the following way.
By default, StoragePolicySatisfier is disabled.
Administrator can dynamically enable or disable StoragePolicySatisfier by using reconfiguration option.
Dynamic enabling/disabling option can be achieved in the following way.
1. Edit/update this configuration property values in hdfs-site.xml
2. Execute the reconfig command on hadoop command line prompt.
For example:$hdfs -reconfig namenode nn_host:port start

View File

@ -108,7 +108,7 @@ Following 2 options will allow users to move the blocks based on new policy set.
When user changes the storage policy on a file/directory, user can call `HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new policy set.
The SPS daemon thread runs along with namenode and periodically scans for the storage mismatches between new policy set and the physical blocks placed. This will only track the files/directories for which user invoked satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, then it will schedule block movement tasks to datanodes. A Coordinator DataNode(C-DN) will track all block movements associated to a file and notify to namenode about movement success/failure. If there are any failures in movement, the SPS will re-attempt by sending new block movement task.
SPS can be activated and deactivated dynamically without restarting the Namenode.
SPS can be enabled and disabled dynamically without restarting the Namenode.
Detailed design documentation can be found at [Storage Policy Satisfier(SPS) (HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
@ -125,8 +125,8 @@ Detailed design documentation can be found at [Storage Policy Satisfier(SPS) (HD
####Configurations:
* **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate SPS. Configuring true represents SPS is
activated and vice versa.
* **dfs.storage.policy.satisfier.enabled** - Used to enable or disable SPS. Configuring true represents SPS is
enabled and vice versa.
* **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to re-check the processed block storage movement
command results from Co-ordinator Datanode.
@ -153,7 +153,7 @@ Note that, when both -p and -f options are omitted, the default path is the root
####Administrator notes:
`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a Mover instance is already triggered and running, SPS will be deactivated while starting. In that case, administrator should make sure, Mover execution finished and then activate SPS again. Similarly when SPS activated already, Mover cannot be run. If administrator is looking to run Mover tool explicitly, then he/she should make sure to deactivate SPS first and then run Mover. Please look at the commands section to know how to activate or deactivate SPS dynamically.
`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a Mover instance is already triggered and running, SPS will be disabled while starting. In that case, administrator should make sure, Mover execution finished and then enable SPS again. Similarly when SPS enabled already, Mover cannot be run. If administrator is looking to run Mover tool explicitly, then he/she should make sure to disable SPS first and then run Mover. Please look at the commands section to know how to enable or disable SPS dynamically.
Storage Policy Commands
-----------------------
@ -232,10 +232,10 @@ Check the running status of Storage Policy Satisfier in namenode. If it is runni
* Command:
hdfs storagepolicies -isSPSRunning
hdfs storagepolicies -isSatisfierRunning
### Activate or Deactivate SPS without restarting Namenode
If administrator wants to activate or deactivate SPS feature while Namenode is running, first he/she needs to update the desired value(true or false) for the configuration item `dfs.storage.policy.satisfier.activate` in configuration file (`hdfs-site.xml`) and then run the following Namenode reconfig command
### Enable or Disable SPS without restarting Namenode
If administrator wants to enable or disable SPS feature while Namenode is running, first he/she needs to update the desired value(true or false) for the configuration item `dfs.storage.policy.satisfier.enabled` in configuration file (`hdfs-site.xml`) and then run the following Namenode reconfig command
+ hdfs dfsadmin -reconfig namenode <host:ipc_port> start

View File

@ -616,7 +616,7 @@ Usage:
[-getStoragePolicy -path <path>]
[-unsetStoragePolicy -path <path>]
[-satisfyStoragePolicy -path <path>]
[-isSPSRunning]
[-isSatisfierRunning]
[-help <command-name>]
Lists out all/Gets/sets/unsets storage policies. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information.

View File

@ -69,7 +69,7 @@ private static void initConf(Configuration conf) {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
1L);
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
}

View File

@ -115,7 +115,7 @@ static void initConf(Configuration conf) {
1L);
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
}
static Mover newMover(Configuration conf) throws IOException {
@ -137,7 +137,7 @@ static Mover newMover(Configuration conf) throws IOException {
public void testScheduleSameBlock() throws IOException {
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(4).build();
try {
@ -462,7 +462,7 @@ private void checkMovePaths(List<Path> actual, Path... expected) {
public void testMoverCli() throws Exception {
final Configuration clusterConf = new HdfsConfiguration();
clusterConf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(clusterConf).numDataNodes(0).build();
try {
@ -497,7 +497,7 @@ public void testMoverCli() throws Exception {
public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
@ -522,14 +522,14 @@ public void testMoverCliWithHAConf() throws Exception {
public void testMoverCliWithFederation() throws Exception {
final Configuration clusterConf = new HdfsConfiguration();
clusterConf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
.numDataNodes(0).build();
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
DFSTestUtil.setFederatedConfiguration(cluster, conf);
try {
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@ -575,14 +575,14 @@ public void testMoverCliWithFederation() throws Exception {
public void testMoverCliWithFederationHA() throws Exception {
final Configuration clusterConf = new HdfsConfiguration();
clusterConf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
.numDataNodes(0).build();
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
try {
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@ -647,7 +647,7 @@ public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
@ -677,7 +677,7 @@ public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
public void testMoveWhenStoragePolicySatisfierIsRunning() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, true);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
@ -800,7 +800,7 @@ void initConfWithStripe(Configuration conf) {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
conf.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
}
@Test(timeout = 300000)

View File

@ -97,7 +97,7 @@ public class TestStorageMover {
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 2L);
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
DEFAULT_CONF.setBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);

View File

@ -47,7 +47,7 @@ public void setup() throws Exception {
@After
public void teardown() {
if (bsmAttemptedItems != null) {
bsmAttemptedItems.deactivate();
bsmAttemptedItems.stop();
bsmAttemptedItems.stopGracefully();
}
}

View File

@ -32,6 +32,7 @@
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@ -44,8 +45,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
@ -223,63 +224,99 @@ public void testReconfigureHearbeatCheck() throws ReconfigurationException {
}
/**
* Tests activate/deactivate Storage Policy Satisfier dynamically.
* Tests enable/disable Storage Policy Satisfier dynamically when
* "dfs.storage.policy.enabled" feature is disabled.
*
* @throws ReconfigurationException
* @throws IOException
*/
@Test(timeout = 30000)
public void testReconfigureStoragePolicySatisfierActivated()
public void testReconfigureSPSWithStoragePolicyDisabled()
throws ReconfigurationException, IOException {
// shutdown cluster
cluster.shutdown();
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final NameNode nameNode = cluster.getNameNode();
verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
// enable SPS
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
"true");
// Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
assertEquals("SPS shouldn't start as "
+ DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled", false,
nameNode.getNamesystem().getBlockManager()
.isStoragePolicySatisfierRunning());
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
true, nameNode.getConf()
.getBoolean(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT));
}
/**
* Tests enable/disable Storage Policy Satisfier dynamically.
*/
@Test(timeout = 30000)
public void testReconfigureStoragePolicySatisfierEnabled()
throws ReconfigurationException {
final NameNode nameNode = cluster.getNameNode();
verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
true);
verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
false);
// try invalid values
try {
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
"text");
fail("ReconfigurationException expected");
} catch (ReconfigurationException e) {
GenericTestUtils.assertExceptionContains(
"For activating or deactivating storage policy satisfier, "
"For enabling or disabling storage policy satisfier, "
+ "we must pass true/false only",
e.getCause());
}
// enable SPS
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
"true");
verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
// disable SPS
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
"false");
verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
false);
// revert to default
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
// enable SPS
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
"true");
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
true, nameNode.getNamesystem().getBlockManager()
.isStoragePolicySatisfierRunning());
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
true, nameNode.getConf()
.getBoolean(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false));
.getBoolean(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false));
}
/**
* Test to satisfy storage policy after deactivating storage policy satisfier.
* Test to satisfy storage policy after disabled storage policy satisfier.
*/
@Test(timeout = 30000)
public void testSatisfyStoragePolicyAfterSatisfierDeactivated()
public void testSatisfyStoragePolicyAfterSatisfierDisabled()
throws ReconfigurationException, IOException {
final NameNode nameNode = cluster.getNameNode();
// deactivate SPS
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
// disable SPS
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
"false");
verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
false);
Path filePath = new Path("/testSPS");
@ -288,32 +325,32 @@ public void testSatisfyStoragePolicyAfterSatisfierDeactivated()
fileSystem.setStoragePolicy(filePath, "COLD");
try {
fileSystem.satisfyStoragePolicy(filePath);
fail("Expected to fail, as storage policy feature has deactivated.");
fail("Expected to fail, as storage policy feature has disabled.");
} catch (RemoteException e) {
GenericTestUtils
.assertExceptionContains("Cannot request to satisfy storage policy "
+ "when storage policy satisfier feature has been deactivated"
+ " by admin. Seek for an admin help to activate it "
+ "when storage policy satisfier feature has been disabled"
+ " by admin. Seek for an admin help to enable it "
+ "or use Mover tool.", e);
}
// revert to default
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
"true");
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
true, nameNode.getNamesystem().getBlockManager()
.isStoragePolicySatisfierRunning());
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
true, nameNode.getConf()
.getBoolean(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false));
.getBoolean(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false));
}
void verifySPSActivated(final NameNode nameNode, String property,
void verifySPSEnabled(final NameNode nameNode, String property,
boolean expected) {
assertEquals(property + " has wrong value", expected, nameNode
.getNamesystem().getBlockManager().isStoragePolicySatisfierRunning());
assertEquals(property + " has wrong value", expected, nameNode.getConf()
.getBoolean(property, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT));
.getBoolean(property, DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT));
}
@Test

View File

@ -96,6 +96,8 @@ private void clusterSetUp(boolean isHAEnabled, Configuration newConf)
conf.set(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
"3000");
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
final int dnNumber = storageTypes.length;
final short replication = 3;
MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf)
@ -282,6 +284,8 @@ public void testWithFederationHA() throws Exception {
MiniDFSCluster haCluster = null;
try {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
haCluster = new MiniDFSCluster
.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
@ -376,7 +380,7 @@ public void testDropSPS() throws Exception {
fs.setStoragePolicy(testFile, ONE_SSD);
fs.satisfyStoragePolicy(testFile);
cluster.getNamesystem().getBlockManager().deactivateSPS();
cluster.getNamesystem().getBlockManager().disableSPS();
// Make sure satisfy xattr has been removed.
DFSTestUtil.waitForXattrRemoved(testFileName,

View File

@ -96,6 +96,8 @@ private void shutdownCluster() {
private void createCluster() throws IOException {
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes,
storagesPerDatanode, capacity);
dfs = hdfsCluster.getFileSystem();
@ -522,7 +524,7 @@ public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
createCluster();
// Stop SPS
hdfsCluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false");
running = hdfsCluster.getFileSystem()
.getClient().isStoragePolicySatisfierRunning();
Assert.assertFalse("SPS should stopped as configured.", running);
@ -533,7 +535,7 @@ public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
// Restart SPS
hdfsCluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "true");
running = hdfsCluster.getFileSystem()
.getClient().isStoragePolicySatisfierRunning();
@ -548,7 +550,7 @@ public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
// Restart SPS again
hdfsCluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "true");
running = hdfsCluster.getFileSystem()
.getClient().isStoragePolicySatisfierRunning();
Assert.assertTrue("SPS should be running as "
@ -558,7 +560,7 @@ public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
doTestWhenStoragePolicySetToCOLD();
} catch (ReconfigurationException e) {
throw new IOException("Exception when reconfigure "
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, e);
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, e);
} finally {
if (out != null) {
out.close();
@ -599,6 +601,8 @@ public void testWhenMoverExitsWithoutDeleteMoverIDFile()
@Test(timeout = 120000)
public void testMoveWithBlockPinning() throws Exception {
config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(3)
.storageTypes(
new StorageType[][] {{StorageType.DISK, StorageType.DISK},
@ -663,6 +667,8 @@ public void testWhenOnlyFewSourceNodesHaveMatchingTargetNodes()
try {
int numOfDns = 5;
config.setLong("dfs.block.size", 1024);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
allDiskTypes =
new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.DISK},
@ -707,6 +713,8 @@ public void testBlockMoveInSameDatanodeWithONESSD() throws Exception {
{StorageType.DISK, StorageType.SSD},
{StorageType.DISK, StorageType.RAM_DISK}};
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
try {
hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
storagesPerDatanode, capacity);
@ -746,6 +754,8 @@ public void testBlockMoveInSameAndRemoteDatanodesWithWARM() throws Exception {
{StorageType.DISK, StorageType.DISK}};
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
try {
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
storagesPerDatanode, capacity);
@ -782,6 +792,8 @@ public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
{StorageType.DISK, StorageType.ARCHIVE}};
try {
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
storagesPerDatanode, capacity);
dfs = hdfsCluster.getFileSystem();
@ -825,6 +837,8 @@ public void testChooseInSameDatanodeWithONESSDShouldNotChooseIfNoSpace()
{StorageType.DISK, StorageType.SSD},
{StorageType.DISK, StorageType.DISK}};
config.setLong("dfs.block.size", 2 * DEFAULT_BLOCK_SIZE);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
long dnCapacity = 1024 * DEFAULT_BLOCK_SIZE + (2 * DEFAULT_BLOCK_SIZE - 1);
try {
hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
@ -915,7 +929,8 @@ public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles()
1L);
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
try {
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
storagesPerDatanode, capacity);
@ -968,8 +983,10 @@ public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles()
public void testSPSWhenFileLengthIsZero() throws Exception {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
.build();
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/zeroSizeFile");
@ -1006,6 +1023,8 @@ public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
conf.set(DFSConfigKeys
.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
"3000");
@ -1054,6 +1073,8 @@ public void testSPSWhenFileHasExcessRedundancyBlocks() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
conf.set(DFSConfigKeys
.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
"3000");

View File

@ -65,6 +65,8 @@ public class TestStoragePolicySatisfierWithHA {
private void createCluster() throws IOException {
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
startCluster(config, allDiskTypes, numOfDatanodes, storagesPerDatanode,
capacity);
dfs = cluster.getFileSystem(nnIndex);
@ -131,15 +133,15 @@ public void testWhenNNHAStateChanges() throws IOException {
try {
cluster.getNameNode(0).reconfigurePropertyImpl(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
Assert.fail("It's not allowed to activate or deactivate"
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false");
Assert.fail("It's not allowed to enable or disable"
+ " StoragePolicySatisfier on Standby NameNode");
} catch (ReconfigurationException e) {
GenericTestUtils.assertExceptionContains("Could not change property "
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY
+ " from 'true' to 'false'", e);
GenericTestUtils.assertExceptionContains(
"Activating or deactivating storage policy satisfier service on "
"Enabling or disabling storage policy satisfier service on "
+ "standby NameNode is not allowed", e.getCause());
}
} finally {

View File

@ -103,6 +103,8 @@ public void testMoverWithFullStripe() throws Exception {
}
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
StripedFileTestUtil.getDefaultECPolicy().getName());
initConfWithStripe(conf, defaultStripeBlockSize);
@ -215,6 +217,8 @@ public void testWhenOnlyFewTargetNodesAreAvailableToSatisfyStoragePolicy()
final Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
StripedFileTestUtil.getDefaultECPolicy().getName());
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
initConfWithStripe(conf, defaultStripeBlockSize);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numOfDatanodes)
@ -325,6 +329,8 @@ public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
"3000");
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
StripedFileTestUtil.getDefaultECPolicy().getName());
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
initConfWithStripe(conf, defaultStripeBlockSize);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numOfDatanodes)
@ -415,6 +421,8 @@ public void testWhenNoTargetDatanodeToSatisfyStoragePolicy()
final Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
StripedFileTestUtil.getDefaultECPolicy().getName());
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
initConfWithStripe(conf, defaultStripeBlockSize);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numOfDatanodes)

View File

@ -48,6 +48,8 @@ public class TestStoragePolicyCommands {
@Before
public void clusterSetUp() throws IOException, URISyntaxException {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
true);
StorageType[][] newtypes = new StorageType[][] {
{StorageType.ARCHIVE, StorageType.DISK}};
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL)
@ -164,7 +166,7 @@ public void testSetAndGetStoragePolicy() throws Exception {
"File/Directory does not exist: /fooz");
}
@Test
@Test(timeout = 30000)
public void testStoragePolicySatisfierCommand() throws Exception {
final String file = "/testStoragePolicySatisfierCommand";
DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
@ -185,18 +187,21 @@ public void testStoragePolicySatisfierCommand() throws Exception {
fs);
}
@Test
public void testIsSPSRunningCommand() throws Exception {
final String file = "/testIsSPSRunningCommand";
@Test(timeout = 30000)
public void testIsSatisfierRunningCommand() throws Exception {
final String file = "/testIsSatisfierRunningCommand";
DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "yes");
DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "yes");
cluster.getNameNode().reconfigureProperty(
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false");
cluster.waitActive();
DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "no");
DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "no");
// Test with unnecessary args
DFSTestUtil.toolRun(admin, "-isSPSRunning status", 1,
DFSTestUtil.toolRun(admin, "-isSatisfierRunning status", 1,
"Can't understand arguments: ");
}
}