HDFS-13688. [SBN read] Introduce msync API call. Contributed by Chen Liang.

This commit is contained in:
Erik Krogen 2018-08-01 09:58:04 -07:00 committed by Konstantin V Shvachko
parent 1e22f2bfbb
commit eae0a5d54a
9 changed files with 75 additions and 1 deletions

View File

@ -3181,4 +3181,18 @@ public RemoteIterator<OpenFileEntry> listOpenFiles(
checkOpen(); checkOpen();
return new OpenFilesIterator(namenode, tracer, openFilesTypes, path); return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
} }
/**
* A blocking call to wait for Observer NameNode state ID to reach to the
* current client state ID. Current client state ID is given by the client
* alignment context.
* An assumption is that client alignment context has the state ID set at this
* point. This is become ObserverReadProxyProvider sets up the initial state
* ID when it is being created.
*
* @throws IOException
*/
public void msync() throws IOException {
namenode.msync();
}
} }

View File

@ -1788,6 +1788,17 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
BatchedEntries<OpenFileEntry> listOpenFiles(long prevId, BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException; EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException;
/**
* Called by client to wait until the server has reached the state id of the
* client. The client and server state id are given by client side and server
* side alignment context respectively. This can be a blocking call.
*
* @throws IOException
*/
@Idempotent
@ReadOnly
void msync() throws IOException;
/** /**
* Satisfy the storage policy for a file/directory. * Satisfy the storage policy for a file/directory.
* @param path Path of an existing file/directory. * @param path Path of an existing file/directory.

View File

@ -158,6 +158,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
@ -1947,6 +1949,16 @@ public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
} }
} }
@Override
public void msync() throws IOException {
MsyncRequestProto.Builder req = MsyncRequestProto.newBuilder();
try {
rpcProxy.msync(null, req.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override @Override
public void satisfyStoragePolicy(String src) throws IOException { public void satisfyStoragePolicy(String src) throws IOException {
SatisfyStoragePolicyRequestProto req = SatisfyStoragePolicyRequestProto req =

View File

@ -830,6 +830,12 @@ message ListOpenFilesResponseProto {
repeated OpenFilesTypeProto types = 3; repeated OpenFilesTypeProto types = 3;
} }
message MsyncRequestProto {
}
message MsyncResponseProto {
}
message SatisfyStoragePolicyRequestProto { message SatisfyStoragePolicyRequestProto {
required string src = 1; required string src = 1;
} }
@ -1024,6 +1030,8 @@ service ClientNamenodeProtocol {
returns(GetQuotaUsageResponseProto); returns(GetQuotaUsageResponseProto);
rpc listOpenFiles(ListOpenFilesRequestProto) rpc listOpenFiles(ListOpenFilesRequestProto)
returns(ListOpenFilesResponseProto); returns(ListOpenFilesResponseProto);
rpc msync(MsyncRequestProto)
returns(MsyncResponseProto);
rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto) rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto)
returns(SatisfyStoragePolicyResponseProto); returns(SatisfyStoragePolicyResponseProto);
} }

View File

@ -71,7 +71,8 @@ public class TestReadOnly {
"getDataEncryptionKey", "getDataEncryptionKey",
"getCurrentEditLogTxid", "getCurrentEditLogTxid",
"getEditsFromTxid", "getEditsFromTxid",
"getQuotaUsage" "getQuotaUsage",
"msync"
) )
); );

View File

@ -1532,6 +1532,11 @@ public BatchedRemoteIterator.BatchedEntries<OpenFileEntry> listOpenFiles(long pr
return null; return null;
} }
@Override
public void msync() throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
}
@Override @Override
public void satisfyStoragePolicy(String path) throws IOException { public void satisfyStoragePolicy(String path) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false);

View File

@ -1193,6 +1193,11 @@ public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
return clientProto.listOpenFiles(prevId, openFilesTypes, path); return clientProto.listOpenFiles(prevId, openFilesTypes, path);
} }
@Override // ClientProtocol
public void msync() throws IOException {
clientProto.msync();
}
@Override // ClientProtocol @Override // ClientProtocol
public void satisfyStoragePolicy(String path) throws IOException { public void satisfyStoragePolicy(String path) throws IOException {
clientProto.satisfyStoragePolicy(path); clientProto.satisfyStoragePolicy(path);

View File

@ -175,6 +175,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
@ -1893,6 +1895,17 @@ public ListOpenFilesResponseProto listOpenFiles(RpcController controller,
} }
} }
@Override
public MsyncResponseProto msync(RpcController controller,
MsyncRequestProto req) throws ServiceException {
try {
server.msync();
return MsyncResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override @Override
public SatisfyStoragePolicyResponseProto satisfyStoragePolicy( public SatisfyStoragePolicyResponseProto satisfyStoragePolicy(
RpcController controller, RpcController controller,

View File

@ -1380,6 +1380,11 @@ public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
return namesystem.listOpenFiles(prevId, openFilesTypes, path); return namesystem.listOpenFiles(prevId, openFilesTypes, path);
} }
@Override // ClientProtocol
public void msync() throws IOException {
// TODO : need to be filled up if needed. May be a no-op here.
}
@Override // ClientProtocol @Override // ClientProtocol
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException { throws IOException {