HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.

This commit is contained in:
Jitendra Pandey 2018-10-16 10:34:16 -07:00
parent 25f8fcb064
commit 53e5173bd1
2 changed files with 4 additions and 2 deletions

View File

@ -53,6 +53,8 @@
public class BlockManagerImpl implements BlockManager { public class BlockManagerImpl implements BlockManager {
static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class); static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
private static byte[] blockCommitSequenceIdKey =
DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
private Configuration config; private Configuration config;
@ -89,8 +91,6 @@ public long putBlock(Container container, BlockData data) throws IOException {
Preconditions.checkNotNull(db, "DB cannot be null here"); Preconditions.checkNotNull(db, "DB cannot be null here");
long blockCommitSequenceId = data.getBlockCommitSequenceId(); long blockCommitSequenceId = data.getBlockCommitSequenceId();
byte[] blockCommitSequenceIdKey =
DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
byte[] blockCommitSequenceIdValue = db.get(blockCommitSequenceIdKey); byte[] blockCommitSequenceIdValue = db.get(blockCommitSequenceIdKey);
// default blockCommitSequenceId for any block is 0. It the putBlock // default blockCommitSequenceId for any block is 0. It the putBlock

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.common.Storage.StorageState; import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.hdds.scm.server.SCMStorage; import org.apache.hadoop.hdds.scm.server.SCMStorage;
@ -283,6 +284,7 @@ public void shutdown() {
scm.getClientProtocolServer().getScmInfo().getClusterId())); scm.getClientProtocolServer().getScmInfo().getClusterId()));
stop(); stop();
FileUtils.deleteDirectory(baseDir); FileUtils.deleteDirectory(baseDir);
ContainerCache.getInstance(conf).shutdownCache();
} catch (IOException e) { } catch (IOException e) {
LOG.error("Exception while shutting down the cluster.", e); LOG.error("Exception while shutting down the cluster.", e);
} }