HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart. (Contributed by Surendra Singh Lilhore)
This commit is contained in:
parent
7d46a806e7
commit
6f541edce0
@ -601,6 +601,9 @@ Release 2.8.0 - UNRELEASED
|
||||
HDFS-8276. LazyPersistFileScrubber should be disabled if scrubber interval
|
||||
configured zero. (Surendra Singh Lilhore via Arpit Agarwal)
|
||||
|
||||
HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart.
|
||||
(Surendra Singh Lilhore via Arpit Agarwal)
|
||||
|
||||
Release 2.7.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -4767,7 +4767,14 @@ private void clearCorruptLazyPersistFiles()
|
||||
public void run() {
|
||||
while (fsRunning && shouldRun) {
|
||||
try {
|
||||
if (!isInSafeMode()) {
|
||||
clearCorruptLazyPersistFiles();
|
||||
} else {
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG
|
||||
.debug("Namenode is in safemode, skipping scrubbing of corrupted lazy-persist files.");
|
||||
}
|
||||
}
|
||||
Thread.sleep(scrubIntervalSec * 1000);
|
||||
} catch (InterruptedException e) {
|
||||
FSNamesystem.LOG.info(
|
||||
|
@ -258,6 +258,7 @@ protected final void startUpCluster(
|
||||
LAZY_WRITER_INTERVAL_SEC);
|
||||
conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
|
||||
evictionLowWatermarkReplicas * BLOCK_SIZE);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
|
||||
|
||||
if (useSCR) {
|
||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
||||
|
@ -16,6 +16,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
||||
import com.google.common.collect.Iterators;
|
||||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -145,6 +146,36 @@ public void testDisableLazyPersistFileScrubber()
|
||||
Assert.assertTrue(fs.exists(path1));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* If NN restarted then lazyPersist files should not deleted
|
||||
*/
|
||||
@Test
|
||||
public void testFileShouldNotDiscardedIfNNRestarted() throws IOException,
|
||||
InterruptedException {
|
||||
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
|
||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
|
||||
makeTestFile(path1, BLOCK_SIZE, true);
|
||||
ensureFileReplicasOnStorageType(path1, RAM_DISK);
|
||||
|
||||
cluster.shutdownDataNodes();
|
||||
|
||||
cluster.restartNameNodes();
|
||||
|
||||
// wait for the replication monitor to mark the file as corrupt
|
||||
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
|
||||
|
||||
Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
|
||||
.getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
|
||||
|
||||
// Check block detected as corrupted
|
||||
assertThat(corruptBlkCount, is(1L));
|
||||
|
||||
// Ensure path1 exist.
|
||||
Assert.assertTrue(fs.exists(path1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Concurrent read from the same node and verify the contents.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user