HDFS-4095. Add some snapshot related metrics. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1404881 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-11-02 05:01:10 +00:00
parent 98c0f13b19
commit 77fe43ac14
6 changed files with 89 additions and 3 deletions

View File

@ -42,3 +42,5 @@ Branch-2802 Snapshot (Unreleased)
(Jing Zhao via suresh)
HDFS-4116. Add auditlog for some snapshot operations. (Jing Zhao via suresh)
HDFS-4095. Add some snapshot related metrics. (Jing Zhao via suresh)

View File

@ -3578,6 +3578,16 @@ public float getCapacityRemainingGB() {
public int getTotalLoad() {
return datanodeStatistics.getXceiverCount();
}
@Metric({ "SnapshottableDirectories", "Number of snapshottable directories" })
public long getNumSnapshottableDirs() {
return this.snapshotManager.getNumSnapshottableDirs();
}
@Metric({ "Snapshots", "The number of snapshots" })
public long getNumSnapshots() {
return this.snapshotManager.getNumSnapshots();
}
int getNumberOfDatanodes(DatanodeReportType type) {
readLock();
@ -5542,7 +5552,8 @@ public void allowSnapshot(String path) throws SafeModeException, IOException {
@VisibleForTesting
public void disallowSnapshot(String snapshotRoot)
throws SafeModeException, IOException {
// TODO: implement
// TODO: implement, also need to update metrics in corresponding
// SnapshotManager method
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),

View File

@ -1080,18 +1080,21 @@ public void createSnapshot(String snapshotName, String snapshotRoot)
throw new IOException("createSnapshot: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
metrics.incrCreateSnapshotOps();
namesystem.createSnapshot(snapshotName, snapshotRoot);
}
@Override
// Client Protocol
public void allowSnapshot(String snapshotRoot) throws IOException {
metrics.incrAllowSnapshotOps();
namesystem.allowSnapshot(snapshotRoot);
}
@Override
// Client Protocol
public void disallowSnapshot(String snapshot) throws IOException {
metrics.incrDisAllowSnapshotOps();
namesystem.disallowSnapshot(snapshot);
}
}

View File

@ -57,6 +57,12 @@ public class NameNodeMetrics {
@Metric MutableCounterLong createSymlinkOps;
@Metric MutableCounterLong getLinkTargetOps;
@Metric MutableCounterLong filesInGetListingOps;
@Metric("Number of allowSnapshot operations")
MutableCounterLong allowSnapshotOps;
@Metric("Number of disallowSnapshot operations")
MutableCounterLong disallowSnapshotOps;
@Metric("Number of createSnapshot operations")
MutableCounterLong createSnapshotOps;
@Metric("Journal transactions") MutableRate transactions;
@Metric("Journal syncs") MutableRate syncs;
@ -159,6 +165,18 @@ public void incrGetLinkTargetOps() {
getLinkTargetOps.incr();
}
public void incrAllowSnapshotOps() {
allowSnapshotOps.incr();
}
public void incrDisAllowSnapshotOps() {
disallowSnapshotOps.incr();
}
public void incrCreateSnapshotOps() {
createSnapshotOps.incr();
}
public void addTransaction(long latency) {
transactions.add(latency);
}

View File

@ -20,6 +20,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -30,10 +31,12 @@
import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
/** Manage snapshottable directories and their snapshots. */
public class SnapshotManager {
public class SnapshotManager implements SnapshotStats {
private final FSNamesystem namesystem;
private final FSDirectory fsdir;
private AtomicLong numSnapshottableDirs = new AtomicLong();
private AtomicLong numSnapshots = new AtomicLong();
/** All snapshottable directories in the namesystem. */
private final List<INodeDirectorySnapshottable> snapshottables
= new ArrayList<INodeDirectorySnapshottable>();
@ -67,6 +70,7 @@ public void setSnapshottable(final String path, final int snapshotQuota
} finally {
namesystem.writeUnlock();
}
numSnapshottableDirs.getAndIncrement();
}
/**
@ -78,6 +82,7 @@ public void setSnapshottable(final String path, final int snapshotQuota
public void createSnapshot(final String snapshotName, final String path
) throws IOException {
new SnapshotCreation(path).run(snapshotName);
numSnapshots.getAndIncrement();
}
/**
@ -174,4 +179,15 @@ private INodeFileSnapshot processINodeFile(final INodeDirectory parent,
return snapshot;
}
}
@Override
public long getNumSnapshottableDirs() {
return numSnapshottableDirs.get();
}
@Override
public long getNumSnapshots() {
return numSnapshots.get();
}
}

View File

@ -0,0 +1,36 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
/**
* This is an interface used to retrieve statistic information related to
* snapshots
*/
public interface SnapshotStats {
/**
* @return The number of snapshottale directories in the system
*/
public long getNumSnapshottableDirs();
/**
* @return The number of directories that have been snapshotted
*/
public long getNumSnapshots();
}