HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart. (Contributed by Surendra Singh Lilhore)

This commit is contained in:
Arpit Agarwal 2015-05-01 16:30:51 -07:00
parent 7d46a806e7
commit 6f541edce0
4 changed files with 43 additions and 1 deletions

View File

@ -601,6 +601,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8276. LazyPersistFileScrubber should be disabled if scrubber interval HDFS-8276. LazyPersistFileScrubber should be disabled if scrubber interval
configured zero. (Surendra Singh Lilhore via Arpit Agarwal) configured zero. (Surendra Singh Lilhore via Arpit Agarwal)
HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart.
(Surendra Singh Lilhore via Arpit Agarwal)
Release 2.7.1 - UNRELEASED Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -4767,7 +4767,14 @@ private void clearCorruptLazyPersistFiles()
public void run() { public void run() {
while (fsRunning && shouldRun) { while (fsRunning && shouldRun) {
try { try {
if (!isInSafeMode()) {
clearCorruptLazyPersistFiles(); clearCorruptLazyPersistFiles();
} else {
if (FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG
.debug("Namenode is in safemode, skipping scrubbing of corrupted lazy-persist files.");
}
}
Thread.sleep(scrubIntervalSec * 1000); Thread.sleep(scrubIntervalSec * 1000);
} catch (InterruptedException e) { } catch (InterruptedException e) {
FSNamesystem.LOG.info( FSNamesystem.LOG.info(

View File

@ -258,6 +258,7 @@ protected final void startUpCluster(
LAZY_WRITER_INTERVAL_SEC); LAZY_WRITER_INTERVAL_SEC);
conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
evictionLowWatermarkReplicas * BLOCK_SIZE); evictionLowWatermarkReplicas * BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
if (useSCR) { if (useSCR) {
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);

View File

@ -16,6 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.collect.Iterators;
import com.google.common.util.concurrent.Uninterruptibles; import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -145,6 +146,36 @@ public void testDisableLazyPersistFileScrubber()
Assert.assertTrue(fs.exists(path1)); Assert.assertTrue(fs.exists(path1));
} }
/**
* If NN restarted then lazyPersist files should not deleted
*/
@Test
public void testFileShouldNotDiscardedIfNNRestarted() throws IOException,
InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
cluster.shutdownDataNodes();
cluster.restartNameNodes();
// wait for the replication monitor to mark the file as corrupt
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
.getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
// Check block detected as corrupted
assertThat(corruptBlkCount, is(1L));
// Ensure path1 exist.
Assert.assertTrue(fs.exists(path1));
}
/** /**
* Concurrent read from the same node and verify the contents. * Concurrent read from the same node and verify the contents.
*/ */