HDFS-5520. loading cache path directives from edit log doesnt update nextEntryId (cmccabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1543286 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4341562622
commit
4f15d0af4f
@ -375,6 +375,9 @@ Trunk (Unreleased)
|
|||||||
|
|
||||||
HDFS-5320. Add datanode caching metrics. (wang)
|
HDFS-5320. Add datanode caching metrics. (wang)
|
||||||
|
|
||||||
|
HDFS-5520. loading cache path directives from edit log doesn't update
|
||||||
|
nextEntryId (cmccabe)
|
||||||
|
|
||||||
Release 2.3.0 - UNRELEASED
|
Release 2.3.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -249,7 +249,7 @@ public GSet<CachedBlock, CachedBlock> getCachedBlocks() {
|
|||||||
|
|
||||||
private long getNextEntryId() throws IOException {
|
private long getNextEntryId() throws IOException {
|
||||||
assert namesystem.hasWriteLock();
|
assert namesystem.hasWriteLock();
|
||||||
if (nextEntryId == Long.MAX_VALUE) {
|
if (nextEntryId >= Long.MAX_VALUE - 1) {
|
||||||
throw new IOException("No more available IDs.");
|
throw new IOException("No more available IDs.");
|
||||||
}
|
}
|
||||||
return nextEntryId++;
|
return nextEntryId++;
|
||||||
@ -357,6 +357,17 @@ public PathBasedCacheDirective addDirective(
|
|||||||
// We are loading an entry from the edit log.
|
// We are loading an entry from the edit log.
|
||||||
// Use the ID from the edit log.
|
// Use the ID from the edit log.
|
||||||
id = directive.getId();
|
id = directive.getId();
|
||||||
|
if (id <= 0) {
|
||||||
|
throw new InvalidRequestException("can't add an ID " +
|
||||||
|
"of " + id + ": it is not positive.");
|
||||||
|
}
|
||||||
|
if (id >= Long.MAX_VALUE) {
|
||||||
|
throw new InvalidRequestException("can't add an ID " +
|
||||||
|
"of " + id + ": it is too big.");
|
||||||
|
}
|
||||||
|
if (nextEntryId <= id) {
|
||||||
|
nextEntryId = id + 1;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Add a new entry with the next available ID.
|
// Add a new entry with the next available ID.
|
||||||
id = getNextEntryId();
|
id = getNextEntryId();
|
||||||
|
@ -51,6 +51,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;
|
import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;
|
||||||
@ -91,6 +92,10 @@ public class TestFsDatasetCache {
|
|||||||
private static PageRounder rounder = new PageRounder();
|
private static PageRounder rounder = new PageRounder();
|
||||||
private static CacheManipulator prevCacheManipulator;
|
private static CacheManipulator prevCacheManipulator;
|
||||||
|
|
||||||
|
static {
|
||||||
|
EditLogFileOutputStream.setShouldSkipFsyncForTesting(false);
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
assumeTrue(!Path.WINDOWS);
|
assumeTrue(!Path.WINDOWS);
|
||||||
|
@ -58,6 +58,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
|
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
@ -85,6 +86,10 @@ public class TestPathBasedCacheRequests {
|
|||||||
static private NamenodeProtocols proto;
|
static private NamenodeProtocols proto;
|
||||||
static private CacheManipulator prevCacheManipulator;
|
static private CacheManipulator prevCacheManipulator;
|
||||||
|
|
||||||
|
static {
|
||||||
|
EditLogFileOutputStream.setShouldSkipFsyncForTesting(false);
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() throws Exception {
|
public void setup() throws Exception {
|
||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration();
|
||||||
@ -510,8 +515,9 @@ public void testCacheManagerRestart() throws Exception {
|
|||||||
// Create some cache entries
|
// Create some cache entries
|
||||||
int numEntries = 10;
|
int numEntries = 10;
|
||||||
String entryPrefix = "/party-";
|
String entryPrefix = "/party-";
|
||||||
|
long prevId = -1;
|
||||||
for (int i=0; i<numEntries; i++) {
|
for (int i=0; i<numEntries; i++) {
|
||||||
dfs.addPathBasedCacheDirective(
|
prevId = dfs.addPathBasedCacheDirective(
|
||||||
new PathBasedCacheDirective.Builder().
|
new PathBasedCacheDirective.Builder().
|
||||||
setPath(new Path(entryPrefix + i)).setPool(pool).build());
|
setPath(new Path(entryPrefix + i)).setPool(pool).build());
|
||||||
}
|
}
|
||||||
@ -549,6 +555,11 @@ public void testCacheManagerRestart() throws Exception {
|
|||||||
assertEquals(pool, cd.getPool());
|
assertEquals(pool, cd.getPool());
|
||||||
}
|
}
|
||||||
assertFalse("Unexpected # of cache directives found", dit.hasNext());
|
assertFalse("Unexpected # of cache directives found", dit.hasNext());
|
||||||
|
|
||||||
|
long nextId = dfs.addPathBasedCacheDirective(
|
||||||
|
new PathBasedCacheDirective.Builder().
|
||||||
|
setPath(new Path("/foobar")).setPool(pool).build());
|
||||||
|
assertEquals(prevId + 1, nextId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user