HDFS-14206. RBF: Cleanup quota modules. Contributed by Inigo Goiri.

This commit is contained in:
Yiqun Lin 2019-01-15 14:21:33 +08:00 committed by Brahma Reddy Battula
parent f4e2bfce58
commit 221f24cbdc
5 changed files with 38 additions and 33 deletions

View File

@ -163,7 +163,7 @@ private QuotaUsage aggregateQuota(Map<RemoteLocation, QuotaUsage> results) {
long ssCount = 0;
long nsQuota = HdfsConstants.QUOTA_RESET;
long ssQuota = HdfsConstants.QUOTA_RESET;
boolean hasQuotaUnSet = false;
boolean hasQuotaUnset = false;
for (Map.Entry<RemoteLocation, QuotaUsage> entry : results.entrySet()) {
RemoteLocation loc = entry.getKey();
@ -172,7 +172,7 @@ private QuotaUsage aggregateQuota(Map<RemoteLocation, QuotaUsage> results) {
// If quota is not set in real FileSystem, the usage
// value will return -1.
if (usage.getQuota() == -1 && usage.getSpaceQuota() == -1) {
hasQuotaUnSet = true;
hasQuotaUnset = true;
}
nsQuota = usage.getQuota();
ssQuota = usage.getSpaceQuota();
@ -189,7 +189,7 @@ private QuotaUsage aggregateQuota(Map<RemoteLocation, QuotaUsage> results) {
QuotaUsage.Builder builder = new QuotaUsage.Builder()
.fileAndDirectoryCount(nsCount).spaceConsumed(ssCount);
if (hasQuotaUnSet) {
if (hasQuotaUnset) {
builder.quota(HdfsConstants.QUOTA_RESET)
.spaceQuota(HdfsConstants.QUOTA_RESET);
} else {

View File

@ -20,7 +20,7 @@
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.updateMountPointStatus;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.BatchedRemoteIterator;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
@ -1147,7 +1147,7 @@ public void removeCacheDirective(long id) throws IOException {
}
@Override
public BatchedRemoteIterator.BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
long prevId, CacheDirectiveInfo filter) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
return null;
@ -1169,7 +1169,7 @@ public void removeCachePool(String cachePoolName) throws IOException {
}
@Override
public BatchedRemoteIterator.BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
return null;
@ -1280,7 +1280,7 @@ public EncryptionZone getEZForPath(String src) throws IOException {
}
@Override
public BatchedRemoteIterator.BatchedEntries<EncryptionZone> listEncryptionZones(long prevId)
public BatchedEntries<EncryptionZone> listEncryptionZones(long prevId)
throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
return null;
@ -1293,7 +1293,7 @@ public void reencryptEncryptionZone(String zone, HdfsConstants.ReencryptAction a
}
@Override
public BatchedRemoteIterator.BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(
public BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(
long prevId) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
return null;
@ -1529,15 +1529,17 @@ public ReplicatedBlockStats getReplicatedBlockStats() throws IOException {
@Deprecated
@Override
public BatchedRemoteIterator.BatchedEntries<OpenFileEntry> listOpenFiles(long prevId)
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId)
throws IOException {
return listOpenFiles(prevId, EnumSet.of(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES),
return listOpenFiles(prevId,
EnumSet.of(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
}
@Override
public BatchedRemoteIterator.BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, String path) throws IOException {
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, String path)
throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
return null;
}
@ -1669,7 +1671,7 @@ private HdfsFileStatus getFileInfoAll(final List<RemoteLocation> locations,
// Get the file info from everybody
Map<RemoteLocation, HdfsFileStatus> results =
rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class);
int children=0;
int children = 0;
// We return the first file
HdfsFileStatus dirStatus = null;
for (RemoteLocation loc : locations) {

View File

@ -88,7 +88,7 @@ public RouterQuotaUsage getQuotaUsage(String path) {
}
/**
* Get children paths (can including itself) under specified federation path.
* Get children paths (can include itself) under specified federation path.
* @param parentPath Federated path.
* @return Set of children paths.
*/

View File

@ -186,10 +186,8 @@ private List<MountTable> getMountTableEntries() throws IOException {
*/
private List<MountTable> getQuotaSetMountTables() throws IOException {
List<MountTable> mountTables = getMountTableEntries();
Set<String> stalePaths = new HashSet<>();
for (String path : this.quotaManager.getAll()) {
stalePaths.add(path);
}
Set<String> allPaths = this.quotaManager.getAll();
Set<String> stalePaths = new HashSet<>(allPaths);
List<MountTable> neededMountTables = new LinkedList<>();
for (MountTable entry : mountTables) {

View File

@ -75,9 +75,10 @@ public Builder spaceQuota(long spaceQuota) {
* @throws NSQuotaExceededException If the quota is exceeded.
*/
public void verifyNamespaceQuota() throws NSQuotaExceededException {
if (Quota.isViolated(getQuota(), getFileAndDirectoryCount())) {
throw new NSQuotaExceededException(getQuota(),
getFileAndDirectoryCount());
long quota = getQuota();
long fileAndDirectoryCount = getFileAndDirectoryCount();
if (Quota.isViolated(quota, fileAndDirectoryCount)) {
throw new NSQuotaExceededException(quota, fileAndDirectoryCount);
}
}
@ -87,25 +88,29 @@ public void verifyNamespaceQuota() throws NSQuotaExceededException {
* @throws DSQuotaExceededException If the quota is exceeded.
*/
public void verifyStoragespaceQuota() throws DSQuotaExceededException {
if (Quota.isViolated(getSpaceQuota(), getSpaceConsumed())) {
throw new DSQuotaExceededException(getSpaceQuota(), getSpaceConsumed());
long spaceQuota = getSpaceQuota();
long spaceConsumed = getSpaceConsumed();
if (Quota.isViolated(spaceQuota, spaceConsumed)) {
throw new DSQuotaExceededException(spaceQuota, spaceConsumed);
}
}
@Override
public String toString() {
String nsQuota = String.valueOf(getQuota());
String nsCount = String.valueOf(getFileAndDirectoryCount());
if (getQuota() == HdfsConstants.QUOTA_RESET) {
nsQuota = "-";
nsCount = "-";
String nsQuota = "-";
String nsCount = "-";
long quota = getQuota();
if (quota != HdfsConstants.QUOTA_RESET) {
nsQuota = String.valueOf(quota);
nsCount = String.valueOf(getFileAndDirectoryCount());
}
String ssQuota = StringUtils.byteDesc(getSpaceQuota());
String ssCount = StringUtils.byteDesc(getSpaceConsumed());
if (getSpaceQuota() == HdfsConstants.QUOTA_RESET) {
ssQuota = "-";
ssCount = "-";
String ssQuota = "-";
String ssCount = "-";
long spaceQuota = getSpaceQuota();
if (spaceQuota != HdfsConstants.QUOTA_RESET) {
ssQuota = StringUtils.byteDesc(spaceQuota);
ssCount = StringUtils.byteDesc(getSpaceConsumed());
}
StringBuilder str = new StringBuilder();