HDFS-13257. Code cleanup: INode never throws QuotaExceededException. Contributed by Tsz Wo Nicholas Sze.
This commit is contained in:
parent
a82be7754d
commit
4c57fb0cd9
@ -17,8 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.util.EnumCounters;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The content types such as file, directory and symlink to be computed.
|
* The content types such as file, directory and symlink to be computed.
|
||||||
*/
|
*/
|
||||||
@ -39,34 +37,4 @@ public enum Content {
|
|||||||
SNAPSHOT,
|
SNAPSHOT,
|
||||||
/** The number of snapshottable directories. */
|
/** The number of snapshottable directories. */
|
||||||
SNAPSHOTTABLE_DIRECTORY;
|
SNAPSHOTTABLE_DIRECTORY;
|
||||||
|
|
||||||
/** Content counts. */
|
|
||||||
public static class Counts extends EnumCounters<Content> {
|
|
||||||
public static Counts newInstance() {
|
|
||||||
return new Counts();
|
|
||||||
}
|
|
||||||
|
|
||||||
private Counts() {
|
|
||||||
super(Content.class);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static final EnumCounters.Factory<Content, Counts> FACTORY
|
|
||||||
= new EnumCounters.Factory<Content, Counts>() {
|
|
||||||
@Override
|
|
||||||
public Counts newInstance() {
|
|
||||||
return Counts.newInstance();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/** A map of counters for the current state and the snapshots. */
|
|
||||||
public static class CountsMap
|
|
||||||
extends EnumCounters.Map<CountsMap.Key, Content, Counts> {
|
|
||||||
/** The key type of the map. */
|
|
||||||
public enum Key { CURRENT, SNAPSHOT }
|
|
||||||
|
|
||||||
CountsMap() {
|
|
||||||
super(FACTORY);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -146,25 +146,6 @@ private void checkStoragespace(final INodeDirectory dir, final long computed) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void addSpaceConsumed(final INodeDirectory dir, final QuotaCounts counts,
|
|
||||||
boolean verify) throws QuotaExceededException {
|
|
||||||
if (dir.isQuotaSet()) {
|
|
||||||
// The following steps are important:
|
|
||||||
// check quotas in this inode and all ancestors before changing counts
|
|
||||||
// so that no change is made if there is any quota violation.
|
|
||||||
// (1) verify quota in this inode
|
|
||||||
if (verify) {
|
|
||||||
verifyQuota(counts);
|
|
||||||
}
|
|
||||||
// (2) verify quota and then add count in ancestors
|
|
||||||
dir.addSpaceConsumed2Parent(counts, verify);
|
|
||||||
// (3) add count in this inode
|
|
||||||
addSpaceConsumed2Cache(counts);
|
|
||||||
} else {
|
|
||||||
dir.addSpaceConsumed2Parent(counts, verify);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Update the space/namespace/type usage of the tree
|
/** Update the space/namespace/type usage of the tree
|
||||||
*
|
*
|
||||||
* @param delta the change of the namespace/space/type usage
|
* @param delta the change of the namespace/space/type usage
|
||||||
|
@ -689,13 +689,13 @@ INodesInPath addSourceToDestination() {
|
|||||||
return fsd.addLastINodeNoQuotaCheck(dstParentIIP, toDst);
|
return fsd.addLastINodeNoQuotaCheck(dstParentIIP, toDst);
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateMtimeAndLease(long timestamp) throws QuotaExceededException {
|
void updateMtimeAndLease(long timestamp) {
|
||||||
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
|
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId());
|
||||||
final INode dstParent = dstParentIIP.getLastINode();
|
final INode dstParent = dstParentIIP.getLastINode();
|
||||||
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
|
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId());
|
||||||
}
|
}
|
||||||
|
|
||||||
void restoreSource() throws QuotaExceededException {
|
void restoreSource() {
|
||||||
// Rename failed - restore src
|
// Rename failed - restore src
|
||||||
final INode oldSrcChild = srcChild;
|
final INode oldSrcChild = srcChild;
|
||||||
// put it back
|
// put it back
|
||||||
@ -722,7 +722,7 @@ void restoreSource() throws QuotaExceededException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void restoreDst(BlockStoragePolicySuite bsps) throws QuotaExceededException {
|
void restoreDst(BlockStoragePolicySuite bsps) {
|
||||||
Preconditions.checkState(oldDstChild != null);
|
Preconditions.checkState(oldDstChild != null);
|
||||||
final INodeDirectory dstParent = dstParentIIP.getLastINode().asDirectory();
|
final INodeDirectory dstParent = dstParentIIP.getLastINode().asDirectory();
|
||||||
if (dstParent.isWithSnapshot()) {
|
if (dstParent.isWithSnapshot()) {
|
||||||
@ -738,8 +738,8 @@ void restoreDst(BlockStoragePolicySuite bsps) throws QuotaExceededException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
|
boolean cleanDst(
|
||||||
throws QuotaExceededException {
|
BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks) {
|
||||||
Preconditions.checkState(oldDstChild != null);
|
Preconditions.checkState(oldDstChild != null);
|
||||||
List<INode> removedINodes = new ChunkedArrayList<>();
|
List<INode> removedINodes = new ChunkedArrayList<>();
|
||||||
List<Long> removedUCFiles = new ChunkedArrayList<>();
|
List<Long> removedUCFiles = new ChunkedArrayList<>();
|
||||||
@ -762,13 +762,13 @@ boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBloc
|
|||||||
return filesDeleted;
|
return filesDeleted;
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateQuotasInSourceTree(BlockStoragePolicySuite bsps) throws QuotaExceededException {
|
void updateQuotasInSourceTree(BlockStoragePolicySuite bsps) {
|
||||||
// update the quota usage in src tree
|
// update the quota usage in src tree
|
||||||
if (isSrcInSnapshot) {
|
if (isSrcInSnapshot) {
|
||||||
// get the counts after rename
|
// get the counts after rename
|
||||||
QuotaCounts newSrcCounts = srcChild.computeQuotaUsage(bsps, false);
|
QuotaCounts newSrcCounts = srcChild.computeQuotaUsage(bsps, false);
|
||||||
newSrcCounts.subtract(oldSrcCounts);
|
newSrcCounts.subtract(oldSrcCounts);
|
||||||
srcParent.addSpaceConsumed(newSrcCounts, false);
|
srcParent.addSpaceConsumed(newSrcCounts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1293,13 +1293,8 @@ public INodesInPath addLastINode(INodesInPath existing, INode inode,
|
|||||||
updateCount(existing, pos, counts, checkQuota);
|
updateCount(existing, pos, counts, checkQuota);
|
||||||
|
|
||||||
boolean isRename = (inode.getParent() != null);
|
boolean isRename = (inode.getParent() != null);
|
||||||
boolean added;
|
final boolean added = parent.addChild(inode, true,
|
||||||
try {
|
existing.getLatestSnapshotId());
|
||||||
added = parent.addChild(inode, true, existing.getLatestSnapshotId());
|
|
||||||
} catch (QuotaExceededException e) {
|
|
||||||
updateCountNoQuotaCheck(existing, pos, counts.negation());
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
if (!added) {
|
if (!added) {
|
||||||
updateCountNoQuotaCheck(existing, pos, counts.negation());
|
updateCountNoQuotaCheck(existing, pos, counts.negation());
|
||||||
return null;
|
return null;
|
||||||
|
@ -39,7 +39,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
@ -471,21 +470,10 @@ public abstract ContentSummaryComputationContext computeContentSummary(
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Check and add namespace/storagespace/storagetype consumed to itself and the ancestors.
|
* Check and add namespace/storagespace/storagetype consumed to itself and the ancestors.
|
||||||
* @throws QuotaExceededException if quote is violated.
|
|
||||||
*/
|
*/
|
||||||
public void addSpaceConsumed(QuotaCounts counts, boolean verify)
|
public void addSpaceConsumed(QuotaCounts counts) {
|
||||||
throws QuotaExceededException {
|
|
||||||
addSpaceConsumed2Parent(counts, verify);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check and add namespace/storagespace/storagetype consumed to itself and the ancestors.
|
|
||||||
* @throws QuotaExceededException if quote is violated.
|
|
||||||
*/
|
|
||||||
void addSpaceConsumed2Parent(QuotaCounts counts, boolean verify)
|
|
||||||
throws QuotaExceededException {
|
|
||||||
if (parent != null) {
|
if (parent != null) {
|
||||||
parent.addSpaceConsumed(counts, verify);
|
parent.addSpaceConsumed(counts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||||
@ -171,13 +170,12 @@ public QuotaCounts getQuotaCounts() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void addSpaceConsumed(QuotaCounts counts, boolean verify)
|
public void addSpaceConsumed(QuotaCounts counts) {
|
||||||
throws QuotaExceededException {
|
super.addSpaceConsumed(counts);
|
||||||
|
|
||||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||||
if (q != null) {
|
if (q != null && isQuotaSet()) {
|
||||||
q.addSpaceConsumed(this, counts, verify);
|
q.addSpaceConsumed2Cache(counts);
|
||||||
} else {
|
|
||||||
addSpaceConsumed2Parent(counts, verify);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,7 +279,7 @@ public void setSnapshotQuota(int snapshotQuota) {
|
|||||||
public Snapshot addSnapshot(int id, String name,
|
public Snapshot addSnapshot(int id, String name,
|
||||||
final LeaseManager leaseManager, final boolean captureOpenFiles,
|
final LeaseManager leaseManager, final boolean captureOpenFiles,
|
||||||
int maxSnapshotLimit)
|
int maxSnapshotLimit)
|
||||||
throws SnapshotException, QuotaExceededException {
|
throws SnapshotException {
|
||||||
return getDirectorySnapshottableFeature().addSnapshot(this, id, name,
|
return getDirectorySnapshottableFeature().addSnapshot(this, id, name,
|
||||||
leaseManager, captureOpenFiles, maxSnapshotLimit);
|
leaseManager, captureOpenFiles, maxSnapshotLimit);
|
||||||
}
|
}
|
||||||
@ -543,7 +541,7 @@ public boolean removeChild(final INode child) {
|
|||||||
* otherwise, return true;
|
* otherwise, return true;
|
||||||
*/
|
*/
|
||||||
public boolean addChild(INode node, final boolean setModTime,
|
public boolean addChild(INode node, final boolean setModTime,
|
||||||
final int latestSnapshotId) throws QuotaExceededException {
|
final int latestSnapshotId) {
|
||||||
final int low = searchChildren(node.getLocalNameBytes());
|
final int low = searchChildren(node.getLocalNameBytes());
|
||||||
if (low >= 0) {
|
if (low >= 0) {
|
||||||
return false;
|
return false;
|
||||||
@ -739,10 +737,9 @@ protected ContentSummaryComputationContext computeDirectoryContentSummary(
|
|||||||
* The reference node to be removed/replaced
|
* The reference node to be removed/replaced
|
||||||
* @param newChild
|
* @param newChild
|
||||||
* The node to be added back
|
* The node to be added back
|
||||||
* @throws QuotaExceededException should not throw this exception
|
|
||||||
*/
|
*/
|
||||||
public void undoRename4ScrParent(final INodeReference oldChild,
|
public void undoRename4ScrParent(final INodeReference oldChild,
|
||||||
final INode newChild) throws QuotaExceededException {
|
final INode newChild) {
|
||||||
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
assert sf != null : "Directory does not have snapshot feature";
|
assert sf != null : "Directory does not have snapshot feature";
|
||||||
sf.getDiffs().removeDeletedChild(oldChild);
|
sf.getDiffs().removeDeletedChild(oldChild);
|
||||||
@ -756,8 +753,7 @@ public void undoRename4ScrParent(final INodeReference oldChild,
|
|||||||
* and delete possible record in the deleted list.
|
* and delete possible record in the deleted list.
|
||||||
*/
|
*/
|
||||||
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
|
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
|
||||||
final INode deletedChild,
|
final INode deletedChild, int latestSnapshotId) {
|
||||||
int latestSnapshotId) throws QuotaExceededException {
|
|
||||||
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
assert sf != null : "Directory does not have snapshot feature";
|
assert sf != null : "Directory does not have snapshot feature";
|
||||||
boolean removeDeletedChild = sf.getDiffs().removeDeletedChild(deletedChild);
|
boolean removeDeletedChild = sf.getDiffs().removeDeletedChild(deletedChild);
|
||||||
@ -767,8 +763,7 @@ public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
|
|||||||
// been stored in deleted list before
|
// been stored in deleted list before
|
||||||
if (added && !removeDeletedChild) {
|
if (added && !removeDeletedChild) {
|
||||||
final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
|
final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
|
||||||
addSpaceConsumed(counts, false);
|
addSpaceConsumed(counts);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Content;
|
import org.apache.hadoop.hdfs.server.namenode.Content;
|
||||||
@ -171,7 +170,7 @@ void addSnapshot(Snapshot snapshot) {
|
|||||||
public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name,
|
public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name,
|
||||||
final LeaseManager leaseManager, final boolean captureOpenFiles,
|
final LeaseManager leaseManager, final boolean captureOpenFiles,
|
||||||
int maxSnapshotLimit)
|
int maxSnapshotLimit)
|
||||||
throws SnapshotException, QuotaExceededException {
|
throws SnapshotException {
|
||||||
//check snapshot quota
|
//check snapshot quota
|
||||||
final int n = getNumSnapshots();
|
final int n = getNumSnapshots();
|
||||||
if (n + 1 > snapshotQuota) {
|
if (n + 1 > snapshotQuota) {
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.*;
|
import org.apache.hadoop.hdfs.server.namenode.*;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||||
@ -524,7 +523,7 @@ public void getSnapshotDirectory(List<INodeDirectory> snapshotDir) {
|
|||||||
* to make sure that parent is in the given snapshot "latest".
|
* to make sure that parent is in the given snapshot "latest".
|
||||||
*/
|
*/
|
||||||
public boolean addChild(INodeDirectory parent, INode inode,
|
public boolean addChild(INodeDirectory parent, INode inode,
|
||||||
boolean setModTime, int latestSnapshotId) throws QuotaExceededException {
|
boolean setModTime, int latestSnapshotId) {
|
||||||
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
|
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
|
||||||
parent).diff;
|
parent).diff;
|
||||||
final int undoInfo = diff.create(inode);
|
final int undoInfo = diff.create(inode);
|
||||||
|
@ -120,7 +120,7 @@ public void loadINodeReferenceSection(InputStream in) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private INodeReference loadINodeReference(
|
private INodeReference loadINodeReference(
|
||||||
INodeReferenceSection.INodeReference r) throws IOException {
|
INodeReferenceSection.INodeReference r) {
|
||||||
long referredId = r.getReferredId();
|
long referredId = r.getReferredId();
|
||||||
INode referred = fsDir.getInode(referredId);
|
INode referred = fsDir.getInode(referredId);
|
||||||
WithCount withCount = (WithCount) referred.getParentReference();
|
WithCount withCount = (WithCount) referred.getParentReference();
|
||||||
|
@ -17,12 +17,11 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.util;
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.HashMap;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.lang.ArrayUtils;
|
import org.apache.commons.lang.ArrayUtils;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Counters for an enum type.
|
* Counters for an enum type.
|
||||||
*
|
*
|
||||||
@ -31,7 +30,7 @@
|
|||||||
* enum Fruit { APPLE, ORANGE, GRAPE }
|
* enum Fruit { APPLE, ORANGE, GRAPE }
|
||||||
* </pre>
|
* </pre>
|
||||||
* An {@link EnumCounters} object can be created for counting the numbers of
|
* An {@link EnumCounters} object can be created for counting the numbers of
|
||||||
* APPLE, ORANGLE and GRAPE.
|
* APPLE, ORANGE and GRAPE.
|
||||||
*
|
*
|
||||||
* @param <E> the enum type
|
* @param <E> the enum type
|
||||||
*/
|
*/
|
||||||
@ -178,69 +177,4 @@ public boolean anyGreaterOrEqual(long val) {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* A factory for creating counters.
|
|
||||||
*
|
|
||||||
* @param <E> the enum type
|
|
||||||
* @param <C> the counter type
|
|
||||||
*/
|
|
||||||
public static interface Factory<E extends Enum<E>,
|
|
||||||
C extends EnumCounters<E>> {
|
|
||||||
/** Create a new counters instance. */
|
|
||||||
public C newInstance();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A key-value map which maps the keys to {@link EnumCounters}.
|
|
||||||
* Note that null key is supported.
|
|
||||||
*
|
|
||||||
* @param <K> the key type
|
|
||||||
* @param <E> the enum type
|
|
||||||
* @param <C> the counter type
|
|
||||||
*/
|
|
||||||
public static class Map<K, E extends Enum<E>, C extends EnumCounters<E>> {
|
|
||||||
/** The factory for creating counters. */
|
|
||||||
private final Factory<E, C> factory;
|
|
||||||
/** Key-to-Counts map. */
|
|
||||||
private final java.util.Map<K, C> counts = new HashMap<K, C>();
|
|
||||||
|
|
||||||
/** Construct a map. */
|
|
||||||
public Map(final Factory<E, C> factory) {
|
|
||||||
this.factory = factory;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return the counters for the given key. */
|
|
||||||
public final C getCounts(final K key) {
|
|
||||||
C c = counts.get(key);
|
|
||||||
if (c == null) {
|
|
||||||
c = factory.newInstance();
|
|
||||||
counts.put(key, c);
|
|
||||||
}
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return the sum of the values of all the counters. */
|
|
||||||
public final C sum() {
|
|
||||||
final C sum = factory.newInstance();
|
|
||||||
for(C c : counts.values()) {
|
|
||||||
sum.add(c);
|
|
||||||
}
|
|
||||||
return sum;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return the sum of the values of all the counters for e. */
|
|
||||||
public final long sum(final E e) {
|
|
||||||
long sum = 0;
|
|
||||||
for(C c : counts.values()) {
|
|
||||||
sum += c.get(e);
|
|
||||||
}
|
|
||||||
return sum;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return counts.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ private static void resetStream() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void tearDownClass() throws Exception {
|
public static void tearDownClass() {
|
||||||
try {
|
try {
|
||||||
System.out.flush();
|
System.out.flush();
|
||||||
System.err.flush();
|
System.err.flush();
|
||||||
@ -170,7 +170,7 @@ private void runCommand(DFSAdmin admin, String args[], boolean expectEror)
|
|||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testDSQuotaExceededExceptionIsHumanReadable() throws Exception {
|
public void testDSQuotaExceededExceptionIsHumanReadable() {
|
||||||
Integer bytes = 1024;
|
Integer bytes = 1024;
|
||||||
try {
|
try {
|
||||||
throw new DSQuotaExceededException(bytes, bytes);
|
throw new DSQuotaExceededException(bytes, bytes);
|
||||||
|
Loading…
Reference in New Issue
Block a user