HDDS-253. SCMBlockDeletingService should publish events for delete blocks to EventQueue. Contributed by Lokesh Jain.

This commit is contained in:
Nanda kumar 2018-07-13 17:18:42 +05:30
parent 3f3f72221f
commit 1fe5b93843
5 changed files with 18 additions and 12 deletions

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
@ -87,10 +88,12 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
* @param conf - configuration. * @param conf - configuration.
* @param nodeManager - node manager. * @param nodeManager - node manager.
* @param containerManager - container manager. * @param containerManager - container manager.
* @param eventPublisher - event publisher.
* @throws IOException * @throws IOException
*/ */
public BlockManagerImpl(final Configuration conf, public BlockManagerImpl(final Configuration conf,
final NodeManager nodeManager, final Mapping containerManager) final NodeManager nodeManager, final Mapping containerManager,
EventPublisher eventPublisher)
throws IOException { throws IOException {
this.nodeManager = nodeManager; this.nodeManager = nodeManager;
this.containerManager = containerManager; this.containerManager = containerManager;
@ -120,9 +123,8 @@ public BlockManagerImpl(final Configuration conf,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
blockDeletingService = blockDeletingService =
new SCMBlockDeletingService( new SCMBlockDeletingService(deletedBlockLog, containerManager,
deletedBlockLog, containerManager, nodeManager, svcInterval, nodeManager, eventPublisher, svcInterval, serviceTimeout, conf);
serviceTimeout, conf);
} }
/** /**

View File

@ -20,11 +20,14 @@
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.container.Mapping; import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundService;
@ -61,6 +64,7 @@ public class SCMBlockDeletingService extends BackgroundService {
private final DeletedBlockLog deletedBlockLog; private final DeletedBlockLog deletedBlockLog;
private final Mapping mappingService; private final Mapping mappingService;
private final NodeManager nodeManager; private final NodeManager nodeManager;
private final EventPublisher eventPublisher;
// Block delete limit size is dynamically calculated based on container // Block delete limit size is dynamically calculated based on container
// delete limit size (ozone.block.deleting.container.limit.per.interval) // delete limit size (ozone.block.deleting.container.limit.per.interval)
@ -76,13 +80,14 @@ public class SCMBlockDeletingService extends BackgroundService {
private int blockDeleteLimitSize; private int blockDeleteLimitSize;
public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog, public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
Mapping mapper, NodeManager nodeManager, Mapping mapper, NodeManager nodeManager, EventPublisher eventPublisher,
long interval, long serviceTimeout, Configuration conf) { long interval, long serviceTimeout, Configuration conf) {
super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS, super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS,
BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
this.deletedBlockLog = deletedBlockLog; this.deletedBlockLog = deletedBlockLog;
this.mappingService = mapper; this.mappingService = mapper;
this.nodeManager = nodeManager; this.nodeManager = nodeManager;
this.eventPublisher = eventPublisher;
int containerLimit = conf.getInt( int containerLimit = conf.getInt(
OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL,
@ -145,8 +150,8 @@ public EmptyTaskResult call() throws Exception {
// We should stop caching new commands if num of un-processed // We should stop caching new commands if num of un-processed
// command is bigger than a limit, e.g 50. In case datanode goes // command is bigger than a limit, e.g 50. In case datanode goes
// offline for sometime, the cached commands be flooded. // offline for sometime, the cached commands be flooded.
nodeManager.addDatanodeCommand(dnId, eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
new DeleteBlocksCommand(dnTXs)); new CommandForDatanode<>(dnId, new DeleteBlocksCommand(dnTXs)));
LOG.debug( LOG.debug(
"Added delete block command for datanode {} in the queue," "Added delete block command for datanode {} in the queue,"
+ " number of delete block transactions: {}, TxID list: {}", + " number of delete block transactions: {}, TxID list: {}",

View File

@ -181,7 +181,7 @@ private StorageContainerManager(OzoneConfiguration conf) throws IOException {
scmContainerManager = new ContainerMapping( scmContainerManager = new ContainerMapping(
conf, getScmNodeManager(), cacheSize); conf, getScmNodeManager(), cacheSize);
scmBlockManager = new BlockManagerImpl( scmBlockManager = new BlockManagerImpl(
conf, getScmNodeManager(), scmContainerManager); conf, getScmNodeManager(), scmContainerManager, eventQueue);
Node2ContainerMap node2ContainerMap = new Node2ContainerMap(); Node2ContainerMap node2ContainerMap = new Node2ContainerMap();

View File

@ -74,7 +74,7 @@ public static void setUp() throws Exception {
} }
nodeManager = new MockNodeManager(true, 10); nodeManager = new MockNodeManager(true, 10);
mapping = new ContainerMapping(conf, nodeManager, 128); mapping = new ContainerMapping(conf, nodeManager, 128);
blockManager = new BlockManagerImpl(conf, nodeManager, mapping); blockManager = new BlockManagerImpl(conf, nodeManager, mapping, null);
if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){ ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){
factor = HddsProtos.ReplicationFactor.THREE; factor = HddsProtos.ReplicationFactor.THREE;

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.ozone.scm; package org.apache.hadoop.ozone.scm;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
@ -117,7 +116,7 @@ public void setup() throws Exception {
nodeManager = cluster.getStorageContainerManager().getScmNodeManager(); nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
mapping = new ContainerMapping(conf, nodeManager, 128); mapping = new ContainerMapping(conf, nodeManager, 128);
blockManager = new BlockManagerImpl(conf, nodeManager, mapping); blockManager = new BlockManagerImpl(conf, nodeManager, mapping, null);
// blockManager.allocateBlock() will create containers if there is none // blockManager.allocateBlock() will create containers if there is none
// stored in levelDB. The number of containers to create is the value of // stored in levelDB. The number of containers to create is the value of