HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the deprecated warnings in DFSAdmin.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1147359 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-07-16 05:12:40 +00:00
parent c093580824
commit c163455df4
10 changed files with 39 additions and 25 deletions

View File

@ -560,6 +560,9 @@ Trunk (unreleased changes)
HDFS-2153. Move DFSClientAdapter to test and fix some javac warnings in
OfflineEditsViewerHelper. (szetszwo)
HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the
deprecated warnings in DFSAdmin. (szetszwo)
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

View File

@ -532,6 +532,9 @@ public String toString() {
return "DFS[" + dfs + "]";
}
/** @deprecated DFSClient should not be accessed directly. */
@InterfaceAudience.Private
@Deprecated
public DFSClient getClient() {
return dfs;
}
@ -624,9 +627,15 @@ public RemoteIterator<Path> listCorruptFileBlocks(Path path)
return new CorruptFileBlockIterator(dfs, path);
}
/** Return statistics for each datanode. */
/** @return datanode statistics. */
public DatanodeInfo[] getDataNodeStats() throws IOException {
return dfs.datanodeReport(DatanodeReportType.ALL);
return getDataNodeStats(DatanodeReportType.ALL);
}
/** @return datanode statistics for the given type. */
public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type
) throws IOException {
return dfs.datanodeReport(type);
}
/**

View File

@ -34,7 +34,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -95,7 +94,7 @@ private static class ClearQuotaCommand extends DFSAdminCommand {
/** Constructor */
ClearQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE);
CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos);
this.args = parameters.toArray(new String[parameters.size()]);
}
@ -140,7 +139,7 @@ private static class SetQuotaCommand extends DFSAdminCommand {
/** Constructor */
SetQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE);
CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos);
this.quota = Long.parseLong(parameters.remove(0));
this.args = parameters.toArray(new String[parameters.size()]);
@ -180,7 +179,7 @@ private static class ClearSpaceQuotaCommand extends DFSAdminCommand {
/** Constructor */
ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE);
CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos);
this.args = parameters.toArray(new String[parameters.size()]);
}
@ -228,7 +227,7 @@ private static class SetSpaceQuotaCommand extends DFSAdminCommand {
/** Constructor */
SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE);
CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos);
String str = parameters.remove(0).trim();
quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
@ -327,10 +326,8 @@ public void report() throws IOException {
System.out.println("-------------------------------------------------");
DatanodeInfo[] live = dfs.getClient().datanodeReport(
DatanodeReportType.LIVE);
DatanodeInfo[] dead = dfs.getClient().datanodeReport(
DatanodeReportType.DEAD);
DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
System.out.println("Datanodes available: " + live.length +
" (" + (live.length + dead.length) + " total, " +
dead.length + " dead)\n");
@ -691,9 +688,8 @@ public int metaSave(String[] argv, int idx) throws IOException {
*/
public int printTopology() throws IOException {
DistributedFileSystem dfs = getDFS();
DFSClient client = dfs.getClient();
DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
final DatanodeInfo[] report = dfs.getDataNodeStats();
// Build a map of rack -> nodes from the datanode report
HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
for(DatanodeInfo dni : report) {

View File

@ -23,10 +23,13 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
public class DFSClientAdapter {
public static DFSClient getDFSClient(DistributedFileSystem dfs) {
return dfs.dfs;
}
public static void stopLeaseRenewer(DFSClient dfsClient) throws IOException {
public static void stopLeaseRenewer(DistributedFileSystem dfs) throws IOException {
try {
dfsClient.leaserenewer.interruptAndJoin();
dfs.dfs.leaserenewer.interruptAndJoin();
} catch (InterruptedException e) {
throw new IOException(e);
}

View File

@ -71,7 +71,7 @@ public void testAbandonBlock() throws IOException {
fout.hflush();
// Now abandon the last block
DFSClient dfsclient = ((DistributedFileSystem)fs).getClient();
DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);

View File

@ -80,7 +80,7 @@ public void testBlockSynchronization() throws Exception {
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
assertTrue(dfs.dfs.exists(filestr));
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
//get block info for the last block

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -84,10 +85,11 @@ public void testBlockMetaDataInfo() throws Exception {
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
assertTrue(dfs.getClient().exists(filestr));
assertTrue(dfs.exists(filepath));
//get block info
LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr);
LocatedBlock locatedblock = getLastLocatedBlock(
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
assertTrue(datanodeinfo.length > 0);
@ -236,7 +238,7 @@ public void testUpdateReplicaUnderRecovery() throws IOException {
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(
dfs.getClient().getNamenode(), filestr);
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);

View File

@ -25,6 +25,7 @@
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -32,7 +33,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
@ -124,7 +124,7 @@ public void testTransferRbw() throws Exception {
final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
oldrbw.getGenerationStamp());
final BlockOpResponseProto s = DFSTestUtil.transferRbw(
b, fs.getClient(), oldnodeinfo, newnodeinfo);
b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
Assert.assertEquals(Status.SUCCESS, s.getStatus());
}

View File

@ -221,7 +221,7 @@ public Object run() throws IOException {
// OP_REASSIGN_LEASE 22
String filePath = "/hard-lease-recovery-test";
byte[] bytes = "foo-bar-baz".getBytes();
DFSClientAdapter.stopLeaseRenewer(dfs.getClient());
DFSClientAdapter.stopLeaseRenewer(dfs);
FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
leaseRecoveryPath.write(bytes);
leaseRecoveryPath.hflush();

View File

@ -27,6 +27,7 @@
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -72,7 +73,7 @@ void writeFile(Path file, FSDataOutputStream stm, int size)
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
while(blocksAfter <= blocksBefore) {
locatedBlocks = hdfs.getClient().getBlockLocations(
locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
}