HDFS-2053. Bug in INodeDirectory#computeContentSummary warning. Contributed by Michael Noll
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1140707 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7663caab5a
commit
1834fb99f5
@ -788,6 +788,9 @@ Trunk (unreleased changes)
|
|||||||
HDFS-1381. HDFS javadocs hard-code references to dfs.namenode.name.dir and
|
HDFS-1381. HDFS javadocs hard-code references to dfs.namenode.name.dir and
|
||||||
dfs.datanode.data.dir parameters (Jim Plush via atm)
|
dfs.datanode.data.dir parameters (Jim Plush via atm)
|
||||||
|
|
||||||
|
HDFS-2053. Bug in INodeDirectory#computeContentSummary warning.
|
||||||
|
(Michael Noll via eli)
|
||||||
|
|
||||||
Release 0.22.0 - Unreleased
|
Release 0.22.0 - Unreleased
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -409,20 +409,31 @@ DirCounts spaceConsumedInTree(DirCounts counts) {
|
|||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
long[] computeContentSummary(long[] summary) {
|
long[] computeContentSummary(long[] summary) {
|
||||||
|
// Walk through the children of this node, using a new summary array
|
||||||
|
// for the (sub)tree rooted at this node
|
||||||
|
assert 4 == summary.length;
|
||||||
|
long[] subtreeSummary = new long[]{0,0,0,0};
|
||||||
if (children != null) {
|
if (children != null) {
|
||||||
for (INode child : children) {
|
for (INode child : children) {
|
||||||
child.computeContentSummary(summary);
|
child.computeContentSummary(subtreeSummary);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (this instanceof INodeDirectoryWithQuota) {
|
if (this instanceof INodeDirectoryWithQuota) {
|
||||||
// Warn if the cached and computed diskspace values differ
|
// Warn if the cached and computed diskspace values differ
|
||||||
INodeDirectoryWithQuota node = (INodeDirectoryWithQuota)this;
|
INodeDirectoryWithQuota node = (INodeDirectoryWithQuota)this;
|
||||||
long space = node.diskspaceConsumed();
|
long space = node.diskspaceConsumed();
|
||||||
if (-1 != node.getDsQuota() && space != summary[3]) {
|
assert -1 == node.getDsQuota() || space == subtreeSummary[3];
|
||||||
|
if (-1 != node.getDsQuota() && space != subtreeSummary[3]) {
|
||||||
NameNode.LOG.warn("Inconsistent diskspace for directory "
|
NameNode.LOG.warn("Inconsistent diskspace for directory "
|
||||||
+getLocalName()+". Cached: "+space+" Computed: "+summary[3]);
|
+getLocalName()+". Cached: "+space+" Computed: "+subtreeSummary[3]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// update the passed summary array with the values for this node's subtree
|
||||||
|
for (int i = 0; i < summary.length; i++) {
|
||||||
|
summary[i] += subtreeSummary[i];
|
||||||
|
}
|
||||||
|
|
||||||
summary[2]++;
|
summary[2]++;
|
||||||
return summary;
|
return summary;
|
||||||
}
|
}
|
||||||
|
@ -712,7 +712,57 @@ public void testSpaceCommands() throws Exception {
|
|||||||
// verify increase in space
|
// verify increase in space
|
||||||
c = dfs.getContentSummary(dstPath);
|
c = dfs.getContentSummary(dstPath);
|
||||||
assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
|
assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
|
||||||
|
|
||||||
|
// Test HDFS-2053 :
|
||||||
|
|
||||||
|
// Create directory /hdfs-2053
|
||||||
|
final Path quotaDir2053 = new Path("/hdfs-2053");
|
||||||
|
assertTrue(dfs.mkdirs(quotaDir2053));
|
||||||
|
|
||||||
|
// Create subdirectories /hdfs-2053/{A,B,C}
|
||||||
|
final Path quotaDir2053_A = new Path(quotaDir2053, "A");
|
||||||
|
assertTrue(dfs.mkdirs(quotaDir2053_A));
|
||||||
|
final Path quotaDir2053_B = new Path(quotaDir2053, "B");
|
||||||
|
assertTrue(dfs.mkdirs(quotaDir2053_B));
|
||||||
|
final Path quotaDir2053_C = new Path(quotaDir2053, "C");
|
||||||
|
assertTrue(dfs.mkdirs(quotaDir2053_C));
|
||||||
|
|
||||||
|
// Factors to vary the sizes of test files created in each subdir.
|
||||||
|
// The actual factors are not really important but they allow us to create
|
||||||
|
// identifiable file sizes per subdir, which helps during debugging.
|
||||||
|
int sizeFactorA = 1;
|
||||||
|
int sizeFactorB = 2;
|
||||||
|
int sizeFactorC = 4;
|
||||||
|
|
||||||
|
// Set space quota for subdirectory C
|
||||||
|
dfs.setQuota(quotaDir2053_C, FSConstants.QUOTA_DONT_SET,
|
||||||
|
(sizeFactorC + 1) * fileSpace);
|
||||||
|
c = dfs.getContentSummary(quotaDir2053_C);
|
||||||
|
assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
|
||||||
|
|
||||||
|
// Create a file under subdirectory A
|
||||||
|
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_A, "fileA"),
|
||||||
|
sizeFactorA * fileLen, replication, 0);
|
||||||
|
c = dfs.getContentSummary(quotaDir2053_A);
|
||||||
|
assertEquals(c.getSpaceConsumed(), sizeFactorA * fileSpace);
|
||||||
|
|
||||||
|
// Create a file under subdirectory B
|
||||||
|
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_B, "fileB"),
|
||||||
|
sizeFactorB * fileLen, replication, 0);
|
||||||
|
c = dfs.getContentSummary(quotaDir2053_B);
|
||||||
|
assertEquals(c.getSpaceConsumed(), sizeFactorB * fileSpace);
|
||||||
|
|
||||||
|
// Create a file under subdirectory C (which has a space quota)
|
||||||
|
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_C, "fileC"),
|
||||||
|
sizeFactorC * fileLen, replication, 0);
|
||||||
|
c = dfs.getContentSummary(quotaDir2053_C);
|
||||||
|
assertEquals(c.getSpaceConsumed(), sizeFactorC * fileSpace);
|
||||||
|
|
||||||
|
// Check space consumed for /hdfs-2053
|
||||||
|
c = dfs.getContentSummary(quotaDir2053);
|
||||||
|
assertEquals(c.getSpaceConsumed(),
|
||||||
|
(sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user