HDFS-2922. HA: close out operation categories. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1292620 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-02-23 02:08:54 +00:00
parent 90a14f89e1
commit 8db31c5972
5 changed files with 27 additions and 20 deletions

View File

@ -226,3 +226,5 @@ HDFS-2929. Stress test and fixes for block synchronization (todd)
HDFS-2972. Small optimization building incremental block report (todd)
HDFS-2973. Re-enable NO_ACK optimization for block deletion. (todd)
HDFS-2922. HA: close out operation categories (eli)

View File

@ -407,6 +407,9 @@ private class BNHAContext extends NameNodeHAContext {
@Override // NameNode
public void checkOperation(OperationCategory op)
throws StandbyException {
if (op == OperationCategory.UNCHECKED) {
return;
}
if (OperationCategory.JOURNAL != op &&
!(OperationCategory.READ == op && allowStaleStandbyReads)) {
String msg = "Operation category " + op

View File

@ -114,6 +114,8 @@ public class NameNode {
* Categories of operations supported by the namenode.
*/
public static enum OperationCategory {
/** Operations that are state agnostic */
UNCHECKED,
/** Read operation that does not change the namespace state */
READ,
/** Write operation that changes the namespace state */

View File

@ -353,9 +353,7 @@ public ExportedBlockKeys getBlockKeys() throws IOException {
public void errorReport(NamenodeRegistration registration,
int errorCode,
String msg) throws IOException {
// nn.checkOperation(OperationCategory.WRITE);
// TODO: I dont think this should be checked - it's just for logging
// and dropping backups
namesystem.checkOperation(OperationCategory.UNCHECKED);
verifyRequest(registration);
LOG.info("Error report from " + registration + ": " + msg);
if(errorCode == FATAL)
@ -707,8 +705,8 @@ public long[] getStats() throws IOException {
@Override // ClientProtocol
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
// TODO(HA): decide on OperationCategory for this
throws IOException {
namesystem.checkOperation(OperationCategory.UNCHECKED);
DatanodeInfo results[] = namesystem.datanodeReport(type);
if (results == null ) {
throw new IOException("Cannot find datanode report");
@ -718,32 +716,32 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
@Override // ClientProtocol
public boolean setSafeMode(SafeModeAction action) throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.UNCHECKED);
return namesystem.setSafeMode(action);
}
@Override // ClientProtocol
public boolean restoreFailedStorage(String arg)
throws AccessControlException {
// TODO:HA decide on OperationCategory for this
public boolean restoreFailedStorage(String arg) throws IOException {
namesystem.checkOperation(OperationCategory.UNCHECKED);
return namesystem.restoreFailedStorage(arg);
}
@Override // ClientProtocol
public void saveNamespace() throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.UNCHECKED);
namesystem.saveNamespace();
}
@Override // ClientProtocol
public void refreshNodes() throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.UNCHECKED);
namesystem.getBlockManager().getDatanodeManager().refreshNodes(
new HdfsConfiguration());
}
@Override // NamenodeProtocol
public long getTransactionID() {
// TODO:HA decide on OperationCategory for this
public long getTransactionID() throws IOException {
namesystem.checkOperation(OperationCategory.CHECKPOINT);
return namesystem.getEditLog().getSyncTxId();
}
@ -755,28 +753,29 @@ public CheckpointSignature rollEditLog() throws IOException {
@Override // NamenodeProtocol
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.READ);
return namesystem.getEditLog().getEditLogManifest(sinceTxId);
}
@Override // ClientProtocol
public void finalizeUpgrade() throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.WRITE);
namesystem.finalizeUpgrade();
}
@Override // ClientProtocol
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.READ);
return namesystem.distributedUpgradeProgress(action);
}
@Override // ClientProtocol
public void metaSave(String filename) throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.UNCHECKED);
namesystem.metaSave(filename);
}
@Override // ClientProtocol
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException {
@ -795,12 +794,12 @@ public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
/**
* Tell all datanodes to use a new, non-persistent bandwidth value for
* dfs.datanode.balance.bandwidthPerSec.
* @param bandwidth Blanacer bandwidth in bytes per second for all datanodes.
* @param bandwidth Balancer bandwidth in bytes per second for all datanodes.
* @throws IOException
*/
@Override // ClientProtocol
public void setBalancerBandwidth(long bandwidth) throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.checkOperation(OperationCategory.UNCHECKED);
namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
}

View File

@ -78,7 +78,8 @@ public void exitState(HAContext context) throws ServiceFailedException {
@Override
public void checkOperation(HAContext context, OperationCategory op)
throws StandbyException {
if (op == OperationCategory.READ && context.allowStaleReads()) {
if (op == OperationCategory.UNCHECKED ||
(op == OperationCategory.READ && context.allowStaleReads())) {
return;
}
String msg = "Operation category " + op + " is not supported in state "