initialValue() {
instances.put(OP_DELETE, new DeleteOp());
instances.put(OP_MKDIR, new MkdirOp());
instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
- instances.put(OP_DATANODE_ADD, new DatanodeAddOp());
- instances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp());
instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
instances.put(OP_SET_OWNER, new SetOwnerOp());
instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
@@ -147,7 +142,6 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin
PermissionStatus permissions;
String clientName;
String clientMachine;
- //final DatanodeDescriptor[] dataNodeDescriptors; UNUSED
private AddCloseOp(FSEditLogOpCodes opCode) {
super(opCode);
@@ -226,13 +220,10 @@ void writeFields(DataOutputStream out) throws IOException {
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
- // versions > 0 support per file replication
- // get name and replication
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
- if (-7 == logVersion && length != 3||
- -17 < logVersion && logVersion < -7 && length != 4 ||
+ if ((-17 < logVersion && length != 4) ||
(logVersion <= -17 && length != 5 && !LayoutVersion.supports(
Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
throw new IOException("Incorrect data format." +
@@ -259,49 +250,26 @@ void readFields(DataInputStream in, int logVersion)
} else {
this.atime = 0;
}
- if (logVersion < -7) {
- if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
- this.blockSize = FSImageSerialization.readLong(in);
- } else {
- this.blockSize = readLong(in);
- }
+
+ if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
+ this.blockSize = FSImageSerialization.readLong(in);
} else {
- this.blockSize = 0;
+ this.blockSize = readLong(in);
}
- // get blocks
this.blocks = readBlocks(in, logVersion);
-
- if (logVersion <= -11) {
- this.permissions = PermissionStatus.read(in);
- } else {
- this.permissions = null;
- }
+ this.permissions = PermissionStatus.read(in);
// clientname, clientMachine and block locations of last block.
- if (this.opCode == OP_ADD && logVersion <= -12) {
+ if (this.opCode == OP_ADD) {
this.clientName = FSImageSerialization.readString(in);
this.clientMachine = FSImageSerialization.readString(in);
- if (-13 <= logVersion) {
- readDatanodeDescriptorArray(in);
- }
} else {
this.clientName = "";
this.clientMachine = "";
}
}
- /** This method is defined for compatibility reason. */
- private static DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in)
- throws IOException {
- DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()];
- for (int i = 0; i < locations.length; i++) {
- locations[i] = new DatanodeDescriptor();
- locations[i].readFieldsFromFSEditLog(in);
- }
- return locations;
- }
-
private static Block[] readBlocks(
DataInputStream in,
int logVersion) throws IOException {
@@ -309,14 +277,7 @@ private static Block[] readBlocks(
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
Block blk = new Block();
- if (logVersion <= -14) {
- blk.readFields(in);
- } else {
- BlockTwo oldblk = new BlockTwo();
- oldblk.readFields(in);
- blk.set(oldblk.blkid, oldblk.len,
- GenerationStamp.GRANDFATHER_GENERATION_STAMP);
- }
+ blk.readFields(in);
blocks[i] = blk;
}
return blocks;
@@ -788,17 +749,14 @@ void writeFields(DataOutputStream out) throws IOException {
}
@Override
- void readFields(DataInputStream in, int logVersion)
- throws IOException {
-
+ void readFields(DataInputStream in, int logVersion) throws IOException {
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
if (-17 < logVersion && length != 2 ||
logVersion <= -17 && length != 3
&& !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
- throw new IOException("Incorrect data format. "
- + "Mkdir operation.");
+ throw new IOException("Incorrect data format. Mkdir operation.");
}
this.path = FSImageSerialization.readString(in);
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
@@ -811,7 +769,6 @@ void readFields(DataInputStream in, int logVersion)
// However, currently this is not being updated/used because of
// performance reasons.
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
- /* unused this.atime = */
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
FSImageSerialization.readLong(in);
} else {
@@ -819,11 +776,7 @@ void readFields(DataInputStream in, int logVersion)
}
}
- if (logVersion <= -11) {
- this.permissions = PermissionStatus.read(in);
- } else {
- this.permissions = null;
- }
+ this.permissions = PermissionStatus.read(in);
}
@Override
@@ -888,77 +841,6 @@ public String toString() {
}
}
- @SuppressWarnings("deprecation")
- static class DatanodeAddOp extends FSEditLogOp {
- private DatanodeAddOp() {
- super(OP_DATANODE_ADD);
- }
-
- static DatanodeAddOp getInstance() {
- return (DatanodeAddOp)opInstances.get()
- .get(OP_DATANODE_ADD);
- }
-
- @Override
- void writeFields(DataOutputStream out) throws IOException {
- throw new IOException("Deprecated, should not write");
- }
-
- @Override
- void readFields(DataInputStream in, int logVersion)
- throws IOException {
- //Datanodes are not persistent any more.
- FSImageSerialization.DatanodeImage.skipOne(in);
- }
-
- @Override
- public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append("DatanodeAddOp [opCode=");
- builder.append(opCode);
- builder.append(", txid=");
- builder.append(txid);
- builder.append("]");
- return builder.toString();
- }
- }
-
- @SuppressWarnings("deprecation")
- static class DatanodeRemoveOp extends FSEditLogOp {
- private DatanodeRemoveOp() {
- super(OP_DATANODE_REMOVE);
- }
-
- static DatanodeRemoveOp getInstance() {
- return (DatanodeRemoveOp)opInstances.get()
- .get(OP_DATANODE_REMOVE);
- }
-
- @Override
- void writeFields(DataOutputStream out) throws IOException {
- throw new IOException("Deprecated, should not write");
- }
-
- @Override
- void readFields(DataInputStream in, int logVersion)
- throws IOException {
- DatanodeID nodeID = new DatanodeID();
- nodeID.readFields(in);
- //Datanodes are not persistent any more.
- }
-
- @Override
- public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append("DatanodeRemoveOp [opCode=");
- builder.append(opCode);
- builder.append(", txid=");
- builder.append(txid);
- builder.append("]");
- return builder.toString();
- }
- }
-
static class SetPermissionsOp extends FSEditLogOp {
String src;
FsPermission permissions;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
index 1f809c12b2..c08a5a92a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
@@ -36,8 +36,8 @@ public enum FSEditLogOpCodes {
OP_DELETE ((byte) 2),
OP_MKDIR ((byte) 3),
OP_SET_REPLICATION ((byte) 4),
- @Deprecated OP_DATANODE_ADD ((byte) 5),
- @Deprecated OP_DATANODE_REMOVE((byte) 6),
+ @Deprecated OP_DATANODE_ADD ((byte) 5), // obsolete
+ @Deprecated OP_DATANODE_REMOVE((byte) 6), // obsolete
OP_SET_PERMISSIONS ((byte) 7),
OP_SET_OWNER ((byte) 8),
OP_CLOSE ((byte) 9),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index e029b24022..f666f35b74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -131,34 +131,22 @@ void load(File curFile)
DataInputStream in = new DataInputStream(fin);
try {
- /*
- * Note: Remove any checks for version earlier than
- * Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get
- * to here with older images.
- */
-
- /*
- * TODO we need to change format of the image file
- * it should not contain version and namespace fields
- */
// read image version: first appeared in version -1
int imgVersion = in.readInt();
- if(getLayoutVersion() != imgVersion)
+ if (getLayoutVersion() != imgVersion) {
throw new InconsistentFSStateException(curFile,
"imgVersion " + imgVersion +
" expected to be " + getLayoutVersion());
+ }
// read namespaceID: first appeared in version -2
in.readInt();
- // read number of files
- long numFiles = readNumFiles(in);
+ long numFiles = in.readLong();
// read in the last generation stamp.
- if (imgVersion <= -12) {
- long genstamp = in.readLong();
- namesystem.setGenerationStamp(genstamp);
- }
+ long genstamp = in.readLong();
+ namesystem.setGenerationStamp(genstamp);
// read the transaction ID of the last edit represented by
// this image
@@ -167,7 +155,6 @@ void load(File curFile)
} else {
imgTxId = 0;
}
-
// read compression related info
FSImageCompression compression;
@@ -189,13 +176,9 @@ void load(File curFile)
loadFullNameINodes(numFiles, in);
}
- // load datanode info
- this.loadDatanodes(in);
+ loadFilesUnderConstruction(in);
- // load Files Under Construction
- this.loadFilesUnderConstruction(in);
-
- this.loadSecretManagerState(in);
+ loadSecretManagerState(in);
// make sure to read to the end of file
int eof = in.read();
@@ -335,89 +318,44 @@ private INode loadINode(DataInputStream in)
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
atime = in.readLong();
}
- if (imgVersion <= -8) {
- blockSize = in.readLong();
- }
+ blockSize = in.readLong();
int numBlocks = in.readInt();
BlockInfo blocks[] = null;
- // for older versions, a blocklist of size 0
- // indicates a directory.
- if ((-9 <= imgVersion && numBlocks > 0) ||
- (imgVersion < -9 && numBlocks >= 0)) {
+ if (numBlocks >= 0) {
blocks = new BlockInfo[numBlocks];
for (int j = 0; j < numBlocks; j++) {
blocks[j] = new BlockInfo(replication);
- if (-14 < imgVersion) {
- blocks[j].set(in.readLong(), in.readLong(),
- GenerationStamp.GRANDFATHER_GENERATION_STAMP);
- } else {
- blocks[j].readFields(in);
- }
- }
- }
- // Older versions of HDFS does not store the block size in inode.
- // If the file has more than one block, use the size of the
- // first block as the blocksize. Otherwise use the default block size.
- //
- if (-8 <= imgVersion && blockSize == 0) {
- if (numBlocks > 1) {
- blockSize = blocks[0].getNumBytes();
- } else {
- long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0);
- blockSize = Math.max(namesystem.getDefaultBlockSize(), first);
+ blocks[j].readFields(in);
}
}
// get quota only when the node is a directory
long nsQuota = -1L;
- if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
- && blocks == null && numBlocks == -1) {
- nsQuota = in.readLong();
- }
- long dsQuota = -1L;
- if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
- && blocks == null && numBlocks == -1) {
- dsQuota = in.readLong();
- }
-
- // Read the symlink only when the node is a symlink
- String symlink = "";
- if (numBlocks == -2) {
- symlink = Text.readString(in);
- }
-
- PermissionStatus permissions = namesystem.getUpgradePermission();
- if (imgVersion <= -11) {
- permissions = PermissionStatus.read(in);
- }
-
- return INode.newINode(permissions, blocks, symlink, replication,
- modificationTime, atime, nsQuota, dsQuota, blockSize);
+ if (blocks == null && numBlocks == -1) {
+ nsQuota = in.readLong();
+ }
+ long dsQuota = -1L;
+ if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
+ && blocks == null && numBlocks == -1) {
+ dsQuota = in.readLong();
}
- private void loadDatanodes(DataInputStream in)
- throws IOException {
- int imgVersion = getLayoutVersion();
-
- if (imgVersion > -3) // pre datanode image version
- return;
- if (imgVersion <= -12) {
- return; // new versions do not store the datanodes any more.
- }
- int size = in.readInt();
- for(int i = 0; i < size; i++) {
- // We don't need to add these descriptors any more.
- FSImageSerialization.DatanodeImage.skipOne(in);
- }
+ // Read the symlink only when the node is a symlink
+ String symlink = "";
+ if (numBlocks == -2) {
+ symlink = Text.readString(in);
}
+
+ PermissionStatus permissions = PermissionStatus.read(in);
+
+ return INode.newINode(permissions, blocks, symlink, replication,
+ modificationTime, atime, nsQuota, dsQuota, blockSize);
+ }
private void loadFilesUnderConstruction(DataInputStream in)
throws IOException {
FSDirectory fsDir = namesystem.dir;
- int imgVersion = getLayoutVersion();
- if (imgVersion > -13) // pre lease image version
- return;
int size = in.readInt();
LOG.info("Number of files under construction = " + size);
@@ -457,17 +395,6 @@ private int getLayoutVersion() {
return namesystem.getFSImage().getStorage().getLayoutVersion();
}
- private long readNumFiles(DataInputStream in)
- throws IOException {
- int imgVersion = getLayoutVersion();
-
- if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) {
- return in.readLong();
- } else {
- return in.readInt();
- }
- }
-
private boolean isRoot(byte[][] path) {
return path.length == 1 &&
path[0] == null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index f5084339e8..d6453fa8b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.io.DataInput;
import java.io.DataInputStream;
-import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
@@ -31,7 +29,6 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -39,7 +36,6 @@
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
@@ -107,13 +103,10 @@ static INodeFileUnderConstruction readINodeUnderConstruction(
String clientName = readString(in);
String clientMachine = readString(in);
- // These locations are not used at all
+ // We previously stored locations for the last block, now we
+ // just record that there are none
int numLocs = in.readInt();
- DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
- for (i = 0; i < numLocs; i++) {
- locations[i] = new DatanodeDescriptor();
- locations[i].readFields(in);
- }
+ assert numLocs == 0 : "Unexpected block locations";
return new INodeFileUnderConstruction(name,
blockReplication,
@@ -320,53 +313,4 @@ public static Block[] readCompactBlockArray(
}
return ret;
}
-
- /**
- * DatanodeImage is used to store persistent information
- * about datanodes into the fsImage.
- */
- static class DatanodeImage implements Writable {
- DatanodeDescriptor node = new DatanodeDescriptor();
-
- static void skipOne(DataInput in) throws IOException {
- DatanodeImage nodeImage = new DatanodeImage();
- nodeImage.readFields(in);
- }
-
- /////////////////////////////////////////////////
- // Writable
- /////////////////////////////////////////////////
- /**
- * Public method that serializes the information about a
- * Datanode to be stored in the fsImage.
- */
- public void write(DataOutput out) throws IOException {
- new DatanodeID(node).write(out);
- out.writeLong(node.getCapacity());
- out.writeLong(node.getRemaining());
- out.writeLong(node.getLastUpdate());
- out.writeInt(node.getXceiverCount());
- }
-
- /**
- * Public method that reads a serialized Datanode
- * from the fsImage.
- */
- public void readFields(DataInput in) throws IOException {
- DatanodeID id = new DatanodeID();
- id.readFields(in);
- long capacity = in.readLong();
- long remaining = in.readLong();
- long lastUpdate = in.readLong();
- int xceiverCount = in.readInt();
-
- // update the DatanodeDescriptor with the data we read in
- node.updateRegInfo(id);
- node.setStorageID(id.getStorageID());
- node.setCapacity(capacity);
- node.setRemaining(remaining);
- node.setLastUpdate(lastUpdate);
- node.setXceiverCount(xceiverCount);
- }
- }
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9d647f2353..06e613aee0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -52,8 +52,6 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
@@ -118,7 +116,6 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.ServiceFailedException;
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -267,7 +264,6 @@ private static final void logAuditEvent(UserGroupInformation ugi,
private boolean persistBlocks;
private UserGroupInformation fsOwner;
private String supergroup;
- private PermissionStatus defaultPermission;
private boolean standbyShouldCheckpoint;
// Scan interval is not configurable.
@@ -846,11 +842,6 @@ private void setConfigurationParameters(Configuration conf)
"must not be specified if HA is not enabled.");
}
- short filePermission = (short)conf.getInt(DFS_NAMENODE_UPGRADE_PERMISSION_KEY,
- DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT);
- this.defaultPermission = PermissionStatus.createImmutable(
- fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
-
this.serverDefaults = new FsServerDefaults(
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
@@ -878,14 +869,6 @@ private void setConfigurationParameters(Configuration conf)
DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
}
- /**
- * Return the default path permission when upgrading from releases with no
- * permissions (<=0.15) to releases with permissions (>=0.16)
- */
- protected PermissionStatus getUpgradePermission() {
- return defaultPermission;
- }
-
NamespaceInfo getNamespaceInfo() {
readLock();
try {
@@ -5072,6 +5055,8 @@ public String getLiveNodes() {
innerinfo.put("lastContact", getLastContact(node));
innerinfo.put("usedSpace", getDfsUsed(node));
innerinfo.put("adminState", node.getAdminState().toString());
+ innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed());
+ innerinfo.put("capacity", node.getCapacity());
info.put(node.getHostName(), innerinfo);
}
return JSON.toString(info);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
index 321d0398c5..9c9b0fdda0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
@@ -59,7 +59,7 @@ private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host,
HttpServletRequest request, NameNode nn)
throws IOException {
final String hostname = host instanceof DatanodeInfo
- ? ((DatanodeInfo)host).getHostName() : host.getHost();
+ ? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
final String scheme = request.getScheme();
final int port = "https".equals(scheme)
? (Integer)getServletContext().getAttribute("datanode.https.port")
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
index 1604ec128b..c8ccca16d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
@@ -59,7 +59,7 @@ private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus st
if (host instanceof DatanodeInfo) {
hostname = ((DatanodeInfo)host).getHostName();
} else {
- hostname = host.getHost();
+ hostname = host.getIpAddr();
}
final int port = "https".equals(scheme)
? (Integer)getServletContext().getAttribute("datanode.https.port")
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 61ad180e86..ca4ab24c21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -854,7 +854,7 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
- + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
+ + "from " + nodeReg + " " + blist.getNumberOfBlocks()
+ " blocks");
}
@@ -870,7 +870,7 @@ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
verifyRequest(nodeReg);
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
- +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length
+ +"from "+nodeReg+" "+receivedAndDeletedBlocks.length
+" blocks.");
}
namesystem.getBlockManager().processIncrementalBlockReport(
@@ -880,7 +880,8 @@ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
@Override // DatanodeProtocol
public void errorReport(DatanodeRegistration nodeReg,
int errorCode, String msg) throws IOException {
- String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
+ String dnName =
+ (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString();
if (errorCode == DatanodeProtocol.NOTIFY) {
LOG.info("Error report from " + dnName + ": " + msg);
@@ -909,13 +910,10 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOExcept
}
/**
- * Verify request.
+ * Verifies the given registration.
*
- * Verifies correctness of the datanode version, registration ID, and
- * if the datanode does not need to be shutdown.
- *
- * @param nodeReg data node registration
- * @throws IOException
+ * @param nodeReg node registration
+ * @throws UnregisteredNodeException if the registration is invalid
*/
void verifyRequest(NodeRegistration nodeReg) throws IOException {
verifyVersion(nodeReg.getVersion());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 8763c93e3b..7cb868b179 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -496,7 +496,7 @@ private void copyBlock(DFSClient dfs, LocatedBlock lblock,
try {
chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
- targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
+ targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
} catch (IOException ie) {
if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
throw new IOException("Could not obtain block " + lblock);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
index 44c07510ba..2dfa59751f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
@@ -260,14 +260,14 @@ void generateHealthReport(JspWriter out, NameNode nn,
// Find out common suffix. Should this be before or after the sort?
String port_suffix = null;
if (live.size() > 0) {
- String name = live.get(0).getName();
+ String name = live.get(0).getXferAddr();
int idx = name.indexOf(':');
if (idx > 0) {
port_suffix = name.substring(idx);
}
for (int i = 1; port_suffix != null && i < live.size(); i++) {
- if (live.get(i).getName().endsWith(port_suffix) == false) {
+ if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
port_suffix = null;
break;
}
@@ -404,7 +404,7 @@ static void redirectToRandomDataNode(ServletContext context,
final String nodeToRedirect;
int redirectPort;
if (datanode != null) {
- nodeToRedirect = datanode.getHost();
+ nodeToRedirect = datanode.getIpAddr();
redirectPort = datanode.getInfoPort();
} else {
nodeToRedirect = nn.getHttpAddress().getHostName();
@@ -466,14 +466,14 @@ private void generateNodeDataHeader(JspWriter out, DatanodeDescriptor d,
+ URLEncoder.encode("/", "UTF-8")
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);
- String name = d.getHostName() + ":" + d.getPort();
+ String name = d.getXferAddrWithHostname();
if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*"))
name = name.replaceAll("\\.[^.:]*", "");
int idx = (suffix != null && name.endsWith(suffix)) ? name
.indexOf(suffix) : -1;
- out.print(rowTxt() + ""
+ out.print(rowTxt() + " | "
+ ((idx > 0) ? name.substring(0, idx) : name) + ""
+ ((alive) ? "" : "\n"));
}
@@ -599,14 +599,14 @@ void generateNodesList(ServletContext context, JspWriter out,
// Find out common suffix. Should this be before or after the sort?
String port_suffix = null;
if (live.size() > 0) {
- String name = live.get(0).getName();
+ String name = live.get(0).getXferAddr();
int idx = name.indexOf(':');
if (idx > 0) {
port_suffix = name.substring(idx);
}
for (int i = 1; port_suffix != null && i < live.size(); i++) {
- if (live.get(i).getName().endsWith(port_suffix) == false) {
+ if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
port_suffix = null;
break;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index c36fb69ee4..9439c631d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -80,9 +80,8 @@ public interface DatanodeProtocol {
*
* @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
* @param registration datanode registration information
- * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
- * new storageID if the datanode did not have one and
- * registration ID for further communication.
+ * @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with
+ * updated registration information
*/
public DatanodeRegistration registerDatanode(DatanodeRegistration registration
) throws IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index d21b92ed1b..228fb62262 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -49,8 +49,8 @@ public class DatanodeRegistration extends DatanodeID
});
}
- public StorageInfo storageInfo;
- public ExportedBlockKeys exportedKeys;
+ private StorageInfo storageInfo;
+ private ExportedBlockKeys exportedKeys;
/**
* Default constructor.
@@ -62,8 +62,8 @@ public DatanodeRegistration() {
/**
* Create DatanodeRegistration
*/
- public DatanodeRegistration(String nodeName) {
- this(nodeName, new StorageInfo(), new ExportedBlockKeys());
+ public DatanodeRegistration(String ipAddr) {
+ this(ipAddr, new StorageInfo(), new ExportedBlockKeys());
}
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
@@ -73,9 +73,9 @@ public DatanodeRegistration(DatanodeID dn, StorageInfo info,
this.exportedKeys = keys;
}
- public DatanodeRegistration(String nodeName, StorageInfo info,
+ public DatanodeRegistration(String ipAddr, StorageInfo info,
ExportedBlockKeys keys) {
- super(nodeName);
+ super(ipAddr);
this.storageInfo = info;
this.exportedKeys = keys;
}
@@ -83,7 +83,19 @@ public DatanodeRegistration(String nodeName, StorageInfo info,
public void setStorageInfo(StorageInfo storage) {
this.storageInfo = new StorageInfo(storage);
}
-
+
+ public StorageInfo getStorageInfo() {
+ return storageInfo;
+ }
+
+ public void setExportedKeys(ExportedBlockKeys keys) {
+ this.exportedKeys = keys;
+ }
+
+ public ExportedBlockKeys getExportedKeys() {
+ return exportedKeys;
+ }
+
@Override // NodeRegistration
public int getVersion() {
return storageInfo.getLayoutVersion();
@@ -96,13 +108,13 @@ public String getRegistrationID() {
@Override // NodeRegistration
public String getAddress() {
- return getName();
+ return getXferAddr();
}
@Override
public String toString() {
return getClass().getSimpleName()
- + "(" + name
+ + "(" + ipAddr
+ ", storageID=" + storageID
+ ", infoPort=" + infoPort
+ ", ipcPort=" + ipcPort
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
index a22ef5fca0..a68f1c2c8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
@@ -38,6 +38,6 @@ public class DisallowedDatanodeException extends IOException {
private static final long serialVersionUID = 1L;
public DisallowedDatanodeException(DatanodeID nodeID) {
- super("Datanode denied communication with namenode: " + nodeID.getName());
+ super("Datanode denied communication with namenode: " + nodeID);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
index a33854b883..a684418545 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
@@ -28,7 +28,7 @@
public interface NodeRegistration {
/**
* Get address of the server node.
- * @return hostname:portNumber
+ * @return ipAddr:portNumber
*/
public String getAddress();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 970f1dc610..d27b664f6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -280,10 +280,11 @@ private static Map toJsonMap(final DatanodeInfo datanodeinfo) {
}
final Map m = new TreeMap();
- m.put("name", datanodeinfo.getName());
+ m.put("ipAddr", datanodeinfo.getIpAddr());
+ m.put("hostName", datanodeinfo.getHostName());
m.put("storageID", datanodeinfo.getStorageID());
+ m.put("xferPort", datanodeinfo.getXferPort());
m.put("infoPort", datanodeinfo.getInfoPort());
-
m.put("ipcPort", datanodeinfo.getIpcPort());
m.put("capacity", datanodeinfo.getCapacity());
@@ -293,7 +294,6 @@ private static Map toJsonMap(final DatanodeInfo datanodeinfo) {
m.put("lastUpdate", datanodeinfo.getLastUpdate());
m.put("xceiverCount", datanodeinfo.getXceiverCount());
m.put("networkLocation", datanodeinfo.getNetworkLocation());
- m.put("hostName", datanodeinfo.getHostName());
m.put("adminState", datanodeinfo.getAdminState().name());
return m;
}
@@ -306,7 +306,9 @@ private static DatanodeInfo toDatanodeInfo(final Map, ?> m) {
return new DatanodeInfo(
(String)m.get("name"),
+ (String)m.get("hostName"),
(String)m.get("storageID"),
+ (int)(long)(Long)m.get("xferPort"),
(int)(long)(Long)m.get("infoPort"),
(int)(long)(Long)m.get("ipcPort"),
@@ -317,7 +319,6 @@ private static DatanodeInfo toDatanodeInfo(final Map, ?> m) {
(Long)m.get("lastUpdate"),
(int)(long)(Long)m.get("xceiverCount"),
(String)m.get("networkLocation"),
- (String)m.get("hostName"),
AdminStates.valueOf((String)m.get("adminState")));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index cc45593b29..a0b055642f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -48,10 +48,12 @@ message BlockTokenIdentifierProto {
* Identifies a Datanode
*/
message DatanodeIDProto {
- required string name = 1; // hostname:portNumber
- required string storageID = 2; // Unique storage id
- required uint32 infoPort = 3; // the port where the infoserver is running
- required uint32 ipcPort = 4; // the port where the ipc Server is running
+ required string ipAddr = 1; // IP address
+ required string hostName = 2; // hostname
+ required string storageID = 3; // unique storage id
+ required uint32 xferPort = 4; // data streaming port
+ required uint32 infoPort = 5; // info server port
+ required uint32 ipcPort = 6; // ipc server port
}
/**
@@ -73,7 +75,6 @@ message DatanodeInfoProto {
optional uint64 lastUpdate = 6 [default = 0];
optional uint32 xceiverCount = 7 [default = 0];
optional string location = 8;
- optional string hostName = 9;
enum AdminState {
NORMAL = 0;
DECOMMISSION_INPROGRESS = 1;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index a920865f42..80503e67ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -143,7 +143,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR
Socket sock = null;
ExtendedBlock block = testBlock.getBlock();
DatanodeInfo[] nodes = testBlock.getLocations();
- targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
+ targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
@@ -162,7 +162,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR
*/
public DataNode getDataNode(LocatedBlock testBlock) {
DatanodeInfo[] nodes = testBlock.getLocations();
- int ipcport = nodes[0].ipcPort;
+ int ipcport = nodes[0].getIpcPort();
return cluster.getDataNode(ipcport);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 7854f95f88..a11b927fce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -339,7 +339,7 @@ public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns,
}
/*
- * Wait up to 20s for the given DN (host:port) to be decommissioned.
+ * Wait up to 20s for the given DN (IP:port) to be decommissioned
*/
public static void waitForDecommission(FileSystem fs, String name)
throws IOException, InterruptedException, TimeoutException {
@@ -351,7 +351,7 @@ public static void waitForDecommission(FileSystem fs, String name)
Thread.sleep(1000);
DistributedFileSystem dfs = (DistributedFileSystem)fs;
for (DatanodeInfo info : dfs.getDataNodeStats()) {
- if (name.equals(info.getName())) {
+ if (name.equals(info.getXferAddr())) {
dn = info;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 6717a01dab..35619f8fd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1041,9 +1041,9 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
// hadoop.security.token.service.use_ip=true
//since the HDFS does things based on IP:port, we need to add the mapping
//for IP:port to rackId
- String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
+ String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
if (racks != null) {
- int port = dn.getSelfAddr().getPort();
+ int port = dn.getXferAddress().getPort();
LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
" to rack " + racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(ipAddr + ":" + port,
@@ -1422,7 +1422,7 @@ public synchronized DataNodeProperties stopDataNode(int i) {
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
LOG.info("MiniDFSCluster Stopping DataNode " +
- dn.getMachineName() +
+ dn.getDisplayName() +
" from a total of " + (dataNodes.size() + 1) +
" datanodes.");
dn.shutdown();
@@ -1433,16 +1433,13 @@ public synchronized DataNodeProperties stopDataNode(int i) {
/*
* Shutdown a datanode by name.
*/
- public synchronized DataNodeProperties stopDataNode(String name) {
+ public synchronized DataNodeProperties stopDataNode(String dnName) {
int i;
for (i = 0; i < dataNodes.size(); i++) {
DataNode dn = dataNodes.get(i).datanode;
- // get BP registration
- DatanodeRegistration dnR =
- DataNodeTestUtils.getDNRegistrationByMachineName(dn, name);
- LOG.info("for name=" + name + " found bp=" + dnR +
- "; with dnMn=" + dn.getMachineName());
- if(dnR != null) {
+ LOG.info("DN name=" + dnName + " found DN=" + dn +
+ " with name=" + dn.getDisplayName());
+ if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
break;
}
}
@@ -1472,9 +1469,9 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop,
String[] args = dnprop.dnArgs;
Configuration newconf = new HdfsConfiguration(conf); // save cloned config
if (keepPort) {
- InetSocketAddress addr = dnprop.datanode.getSelfAddr();
- conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
+ InetSocketAddress addr = dnprop.datanode.getXferAddress();
+ conf.set(DFS_DATANODE_ADDRESS_KEY,
+ addr.getAddress().getHostAddress() + ":" + addr.getPort());
}
dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
newconf, args));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
index dea2ba0ba3..327dd7c7ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
@@ -220,7 +220,7 @@ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
corruptBlock(block, dn);
LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
- + dninfo.getName());
+ + dninfo);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
index 6673bf547b..335734d5b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
@@ -158,7 +158,7 @@ public void testSocketCache() throws IOException {
testFile.toString(), 0, FILE_SIZE)
.getLocatedBlocks().get(0);
DataNode dn = util.getDataNode(block);
- InetSocketAddress dnAddr = dn.getSelfAddr();
+ InetSocketAddress dnAddr = dn.getXferAddress();
// Make some sockets to the DN
Socket[] dnSockets = new Socket[CACHE_SIZE];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index 4d614b8d18..d21592e485 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -50,7 +50,7 @@ public void testDFSAddressConfig() throws IOException {
ArrayList dns = cluster.getDataNodes();
DataNode dn = dns.get(0);
- String selfSocketAddr = dn.getSelfAddr().toString();
+ String selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
@@ -75,7 +75,7 @@ public void testDFSAddressConfig() throws IOException {
dns = cluster.getDataNodes();
dn = dns.get(0);
- selfSocketAddr = dn.getSelfAddr().toString();
+ selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 127.0.0.1
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
@@ -100,7 +100,7 @@ public void testDFSAddressConfig() throws IOException {
dns = cluster.getDataNodes();
dn = dns.get(0);
- selfSocketAddr = dn.getSelfAddr().toString();
+ selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 0.0.0.0
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 1e39b9a40d..f3b980f5fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -334,7 +334,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
LocatedBlock badLocatedBlock = new LocatedBlock(
goodLocatedBlock.getBlock(),
new DatanodeInfo[] {
- new DatanodeInfo(new DatanodeID("255.255.255.255:234"))
+ new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
},
goodLocatedBlock.getStartOffset(),
false);
@@ -608,7 +608,7 @@ public void testGetFileChecksum() throws Exception {
cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE)
.getLocatedBlocks();
final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
- cluster.stopDataNode(first.getName());
+ cluster.stopDataNode(first.getXferAddr());
//get checksum again
final FileChecksum cs2 = fs.getFileChecksum(p);
@@ -629,7 +629,7 @@ public void testClientDNProtocolTimeout() throws IOException {
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = new DatanodeID(
- "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
+ "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 567fbabddd..ba92c569d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -52,7 +52,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
.getLog(TestDFSUpgradeFromImage.class);
private static File TEST_ROOT_DIR =
new File(MiniDFSCluster.getBaseDirectory());
- private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
@@ -68,10 +67,6 @@ private static class ReferenceFileInfo {
boolean printChecksum = false;
- public void unpackStorage() throws IOException {
- unpackStorage(HADOOP14_IMAGE);
- }
-
private void unpackStorage(String tarFileName)
throws IOException {
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
@@ -227,14 +222,6 @@ public void testFailOnPreUpgradeImage() throws IOException {
}
}
- /**
- * Test upgrade from an 0.14 image
- */
- public void testUpgradeFromRel14Image() throws IOException {
- unpackStorage();
- upgradeAndVerify();
- }
-
/**
* Test upgrade from 0.22 image
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index af0bf6a19d..71ad9afa95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -128,8 +128,7 @@ private void sendRecvData(String testDescription,
if (eofExpected) {
throw new IOException("Did not recieve IOException when an exception " +
- "is expected while reading from " +
- datanode.getName());
+ "is expected while reading from " + datanode);
}
byte[] needed = recvBuf.toByteArray();
@@ -215,7 +214,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n
String poolId = cluster.getNamesystem().getBlockPoolId();
datanode = DataNodeTestUtils.getDNRegistrationForBP(
cluster.getDataNodes().get(0), poolId);
- dnAddr = NetUtils.createSocketAddr(datanode.getName());
+ dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
FileSystem fileSys = cluster.getFileSystem();
/* Test writing to finalized replicas */
@@ -349,7 +348,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n
new InetSocketAddress("localhost", cluster.getNameNodePort()),
conf);
datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
- dnAddr = NetUtils.createSocketAddr(datanode.getName());
+ dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
FileSystem fileSys = cluster.getFileSystem();
int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
index 288d432d84..c1b775939b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
@@ -269,7 +269,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes,
if (corruptReplica(block, i)) {
corruptReplicasDNIDs[j++] = i;
LOG.info("successfully corrupted block " + block + " on node "
- + i + " " + cluster.getDataNodes().get(i).getSelfAddr());
+ + i + " " + cluster.getDataNodes().get(i).getDisplayName());
}
}
@@ -281,7 +281,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes,
for (int i = numCorruptReplicas - 1; i >= 0 ; i--) {
LOG.info("restarting node with corrupt replica: position "
+ i + " node " + corruptReplicasDNIDs[i] + " "
- + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getSelfAddr());
+ + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getDisplayName());
cluster.restartDataNode(corruptReplicasDNIDs[i]);
}
@@ -343,7 +343,7 @@ public void testTruncatedBlockReport() throws Exception {
if (!changeReplicaLength(block, 0, -1)) {
throw new IOException(
"failed to find or change length of replica on node 0 "
- + cluster.getDataNodes().get(0).getSelfAddr());
+ + cluster.getDataNodes().get(0).getDisplayName());
}
} finally {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index e271bb95a8..fbe98dce5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -389,9 +389,8 @@ private void simpleTest(int datanodeToKill) throws IOException {
cluster.stopDataNode(victim);
} else {
int victim = datanodeToKill;
- System.out.println("SimpleTest stopping datanode " +
- targets[victim].getName());
- cluster.stopDataNode(targets[victim].getName());
+ System.out.println("SimpleTest stopping datanode " + targets[victim]);
+ cluster.stopDataNode(targets[victim].getXferAddr());
}
System.out.println("SimpleTest stopping datanode complete");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 6997ebc2e7..877ad1841c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -151,27 +151,27 @@ private String checkFile(FileSystem fileSys, Path name, int repl,
int hasdown = 0;
DatanodeInfo[] nodes = blk.getLocations();
for (int j = 0; j < nodes.length; j++) { // for each replica
- if (isNodeDown && nodes[j].getName().equals(downnode)) {
+ if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
hasdown++;
//Downnode must actually be decommissioned
if (!nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " +
- nodes[j].getName() + " is given as downnode, " +
+ nodes[j] + " is given as downnode, " +
"but is not decommissioned";
}
//Decommissioned node (if any) should only be last node in list.
if (j != nodes.length - 1) {
return "For block " + blk.getBlock() + " decommissioned node "
- + nodes[j].getName() + " was not last node in list: "
+ + nodes[j] + " was not last node in list: "
+ (j + 1) + " of " + nodes.length;
}
LOG.info("Block " + blk.getBlock() + " replica on " +
- nodes[j].getName() + " is decommissioned.");
+ nodes[j] + " is decommissioned.");
} else {
//Non-downnodes must not be decommissioned
if (nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " +
- nodes[j].getName() + " is unexpectedly decommissioned";
+ nodes[j] + " is unexpectedly decommissioned";
}
}
}
@@ -215,7 +215,7 @@ private DatanodeInfo decommissionNode(int nnIndex,
found = true;
}
}
- String nodename = info[index].getName();
+ String nodename = info[index].getXferAddr();
LOG.info("Decommissioning node: " + nodename);
// write nodename into the exclude file.
@@ -236,7 +236,7 @@ private DatanodeInfo decommissionNode(int nnIndex,
/* stop decommission of the datanode and wait for each to reach the NORMAL state */
private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
- LOG.info("Recommissioning node: " + decommissionedNode.getName());
+ LOG.info("Recommissioning node: " + decommissionedNode);
writeConfigFile(excludeFile, null);
refreshNodes(cluster.getNamesystem(), conf);
waitNodeState(decommissionedNode, AdminStates.NORMAL);
@@ -373,7 +373,7 @@ private void testDecommission(int numNamenodes, int numDatanodes)
DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
assertEquals("All datanodes must be alive", numDatanodes,
client.datanodeReport(DatanodeReportType.LIVE).length);
- assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
+ assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
cleanupFile(fileSys, file1);
}
}
@@ -414,7 +414,7 @@ private void testRecommission(int numNamenodes, int numDatanodes)
DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
assertEquals("All datanodes must be alive", numDatanodes,
client.datanodeReport(DatanodeReportType.LIVE).length);
- assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
+ assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
// stop decommission and check if the new replicas are removed
recomissionNode(decomNode);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index a9000ed5fc..87848f33a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -844,7 +844,7 @@ public void testLeaseExpireHardLimit() throws Exception {
LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
int successcount = 0;
for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
- DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
+ DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk = locatedblock.getBlock();
Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
blk.getBlockPoolId(), blk.getBlockId());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
index 3b617c71d5..7e2630ec30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
@@ -147,7 +147,7 @@ private void testDataNodeRedirect(Path path) throws IOException {
// if we were redirected to the right DN.
BlockLocation[] locations =
hdfs.getFileBlockLocations(path, 0, 10);
- String locationName = locations[0].getNames()[0];
+ String xferAddr = locations[0].getNames()[0];
// Connect to the NN to get redirected
URL u = hftpFs.getNamenodeURL(
@@ -164,7 +164,7 @@ private void testDataNodeRedirect(Path path) throws IOException {
for (DataNode node : cluster.getDataNodes()) {
DatanodeRegistration dnR =
DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
- if (dnR.getName().equals(locationName)) {
+ if (dnR.getXferAddr().equals(xferAddr)) {
checked = true;
assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
index 3e90665590..65a0465bd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.net.NetUtils;
@@ -58,8 +59,9 @@ public static void setUp() throws Exception {
cluster = (new MiniDFSCluster.Builder(conf))
.numDataNodes(1).build();
nnAddress = cluster.getNameNode().getNameNodeAddress();
- dnAddress = new InetSocketAddress(cluster.getDataNodes().get(0)
- .getDatanodeId().getHost(), cluster.getDataNodes().get(0).getIpcPort());
+ DataNode dn = cluster.getDataNodes().get(0);
+ dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(),
+ dn.getIpcPort());
}
@AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index e211d20977..c05ccee7ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -117,7 +117,7 @@ private void checkFile(FileSystem fileSys, Path name, int repl)
isOnSameRack = false;
isNotOnSameRack = false;
for (int i = 0; i < datanodes.length-1; i++) {
- LOG.info("datanode "+ i + ": "+ datanodes[i].getName());
+ LOG.info("datanode "+ i + ": "+ datanodes[i]);
boolean onRack = false;
for( int j=i+1; j expected,
@Test
public void testConvertLocatedBlock() {
DatanodeInfo [] dnInfos = new DatanodeInfo[3];
- dnInfos[0] = new DatanodeInfo("host0", "0", 5000, 5001, 20000, 10001, 9999,
- 59, 69, 32, "local", "host0", AdminStates.DECOMMISSION_INPROGRESS);
- dnInfos[1] = new DatanodeInfo("host1", "1", 5000, 5001, 20000, 10001, 9999,
- 59, 69, 32, "local", "host1", AdminStates.DECOMMISSIONED);
- dnInfos[2] = new DatanodeInfo("host2", "2", 5000, 5001, 20000, 10001, 9999,
- 59, 69, 32, "local", "host1", AdminStates.NORMAL);
+ dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999,
+ 59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
+ dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999,
+ 59, 69, 32, "local", AdminStates.DECOMMISSIONED);
+ dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999,
+ 59, 69, 32, "local", AdminStates.NORMAL);
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
LocatedBlockProto lbProto = PBHelper.convert(lb);
@@ -423,7 +423,7 @@ public void testConvertLocatedBlock() {
@Test
public void testConvertDatanodeRegistration() {
- DatanodeID dnId = new DatanodeID("host", "xyz", 1, 0);
+ DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
@@ -431,8 +431,8 @@ public void testConvertDatanodeRegistration() {
new StorageInfo(), expKeys);
DatanodeRegistrationProto proto = PBHelper.convert(reg);
DatanodeRegistration reg2 = PBHelper.convert(proto);
- compare(reg.storageInfo, reg2.storageInfo);
- compare(reg.exportedKeys, reg2.exportedKeys);
+ compare(reg.getStorageInfo(), reg2.getStorageInfo());
+ compare(reg.getExportedKeys(), reg2.getExportedKeys());
compare((DatanodeID)reg, (DatanodeID)reg2);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 01725b1bce..ea335d2612 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -279,8 +279,8 @@ public void testBlockTokenRpcLeak() throws Exception {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
- DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(),
- "fake-storage", 0, addr.getPort());
+ DatanodeID fakeDnId = new DatanodeID("localhost",
+ "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index 1ec75112f8..41dbf1368c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -165,7 +165,7 @@ public static void noticeDeadDatanode(NameNode nn, String dnName) {
DatanodeDescriptor[] dnds = hbm.getDatanodes();
DatanodeDescriptor theDND = null;
for (DatanodeDescriptor dnd : dnds) {
- if (dnd.getName().equals(dnName)) {
+ if (dnd.getXferAddr().equals(dnName)) {
theDND = dnd;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index dbecfe7f78..0be519dd46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -48,12 +48,12 @@
public class TestBlockManager {
private final List nodes = ImmutableList.of(
- new DatanodeDescriptor(new DatanodeID("h1:5020"), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h2:5020"), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h3:5020"), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h4:5020"), "/rackB"),
- new DatanodeDescriptor(new DatanodeID("h5:5020"), "/rackB"),
- new DatanodeDescriptor(new DatanodeID("h6:5020"), "/rackB")
+ new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
+ new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
+ new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
);
private final List rackA = nodes.subList(0, 3);
private final List rackB = nodes.subList(3, 6);
@@ -272,7 +272,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// the block is still under-replicated. Add a new node. This should allow
// the third off-rack replica.
- DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7:5020"), "/rackC");
+ DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
addNodes(ImmutableList.of(rackCNode));
try {
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 0a25ef7983..c9436e4f33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -137,7 +137,7 @@ private static void tryRead(Configuration conf, LocatedBlock lblock,
ExtendedBlock block = lblock.getBlock();
try {
DatanodeInfo[] nodes = lblock.getLocations();
- targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
+ targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
index d34cf1c4c6..08607093db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
@@ -28,13 +28,13 @@
public class TestHost2NodesMap {
private Host2NodesMap map = new Host2NodesMap();
private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"),
};
private final DatanodeDescriptor NULL_NODE = null;
- private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
+ private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040),
"/d1/r4");
@Before
@@ -56,24 +56,11 @@ public void testContains() throws Exception {
@Test
public void testGetDatanodeByHost() throws Exception {
- assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
- assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
- DatanodeDescriptor node = map.getDatanodeByHost("h3");
+ assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]);
+ assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+ DatanodeDescriptor node = map.getDatanodeByHost("ip3");
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
- assertTrue(null==map.getDatanodeByHost("h4"));
- }
-
- @Test
- public void testGetDatanodeByName() throws Exception {
- assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
- assertTrue(map.getDatanodeByName("h1:5030")==null);
- assertTrue(map.getDatanodeByName("h2:5020")==dataNodes[1]);
- assertTrue(map.getDatanodeByName("h2:5030")==null);
- assertTrue(map.getDatanodeByName("h3:5020")==dataNodes[2]);
- assertTrue(map.getDatanodeByName("h3:5030")==dataNodes[3]);
- assertTrue(map.getDatanodeByName("h3:5040")==null);
- assertTrue(map.getDatanodeByName("h4")==null);
- assertTrue(map.getDatanodeByName(null)==null);
+ assertTrue(null==map.getDatanodeByHost("ip4"));
}
@Test
@@ -81,21 +68,21 @@ public void testRemove() throws Exception {
assertFalse(map.remove(NODE));
assertTrue(map.remove(dataNodes[0]));
- assertTrue(map.getDatanodeByHost("h1")==null);
- assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
- DatanodeDescriptor node = map.getDatanodeByHost("h3");
+ assertTrue(map.getDatanodeByHost("ip1")==null);
+ assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+ DatanodeDescriptor node = map.getDatanodeByHost("ip3");
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
- assertTrue(null==map.getDatanodeByHost("h4"));
+ assertTrue(null==map.getDatanodeByHost("ip4"));
assertTrue(map.remove(dataNodes[2]));
- assertTrue(map.getDatanodeByHost("h1")==null);
- assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
- assertTrue(map.getDatanodeByHost("h3")==dataNodes[3]);
+ assertTrue(map.getDatanodeByHost("ip1")==null);
+ assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+ assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]);
assertTrue(map.remove(dataNodes[3]));
- assertTrue(map.getDatanodeByHost("h1")==null);
- assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
- assertTrue(map.getDatanodeByHost("h3")==null);
+ assertTrue(map.getDatanodeByHost("ip1")==null);
+ assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+ assertTrue(map.getDatanodeByHost("ip3")==null);
assertFalse(map.remove(NULL_NODE));
assertTrue(map.remove(dataNodes[1]));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
index d47f110344..2c92b66bb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
@@ -78,11 +78,11 @@ public void testNodeCount() throws Exception {
// bring down first datanode
DatanodeDescriptor datanode = datanodes[0];
- DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
+ DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr());
// make sure that NN detects that the datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
- cluster.getNameNode(), datanode.getName());
+ cluster.getNameNode(), datanode.getXferAddr());
// the block will be replicated
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
@@ -112,10 +112,10 @@ public void testNodeCount() throws Exception {
assertTrue(nonExcessDN!=null);
// bring down non excessive datanode
- dnprop = cluster.stopDataNode(nonExcessDN.getName());
+ dnprop = cluster.stopDataNode(nonExcessDN.getXferAddr());
// make sure that NN detects that the datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
- cluster.getNameNode(), nonExcessDN.getName());
+ cluster.getNameNode(), nonExcessDN.getXferAddr());
// The block should be replicated
initializeTimeout(TIMEOUT);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index f7a5c0e065..cd4dfb94a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -91,9 +91,9 @@ public void testProcesOverReplicateBlock() throws IOException {
synchronized(hm) {
// set live datanode's remaining space to be 0
// so they will be chosen to be deleted when over-replication occurs
- String corruptMachineName = corruptDataNode.getName();
+ String corruptMachineName = corruptDataNode.getXferAddr();
for (DatanodeDescriptor datanode : hm.getDatanodes()) {
- if (!corruptMachineName.equals(datanode.getName())) {
+ if (!corruptMachineName.equals(datanode.getXferAddr())) {
datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0, 0);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
index 16977bb820..f73245860a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
@@ -40,7 +40,7 @@ public class TestPendingDataNodeMessages {
private final Block block2Gs1 = new Block(2, 0, 1);
private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
- new DatanodeID("fake"));
+ new DatanodeID("fake", 100));
@Test
public void testQueues() {
@@ -56,8 +56,8 @@ public void testQueues() {
Queue q =
msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals(
- "ReportedBlockInfo [block=blk_1_1, dn=fake, reportedState=FINALIZED]," +
- "ReportedBlockInfo [block=blk_1_2, dn=fake, reportedState=FINALIZED]",
+ "ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," +
+ "ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]",
Joiner.on(",").join(q));
assertEquals(0, msgs.count());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index f5926281ee..49925ab885 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -52,16 +52,16 @@ public class TestReplicationPolicy {
private static final String filename = "/dummyfile.txt";
private static final DatanodeDescriptor dataNodes[] =
new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d2/r3"),
- new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3")
+ new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
+ new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
};
private final static DatanodeDescriptor NODE =
- new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r4");
+ new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
static {
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
deleted file mode 100644
index 25dce520a6..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-package org.apache.hadoop.hdfs.server.common;
-
-import static org.apache.hadoop.hdfs.protocol.HdfsConstants.LAYOUT_VERSION;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode;
-import org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-/**
- */
-public class TestDistributedUpgrade {
- private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class);
- private Configuration conf;
- private int testCounter = 0;
- private MiniDFSCluster cluster = null;
- private String clusterId = "testClsterId";
-
- /**
- * Writes an INFO log message containing the parameters.
- */
- void log(String label, int numDirs) {
- LOG.info("============================================================");
- LOG.info("***TEST " + (testCounter++) + "*** "
- + label + ":"
- + " numDirs="+numDirs);
- }
-
- /**
- * Attempts to start a NameNode with the given operation. Starting
- * the NameNode should throw an exception.
- */
- void startNameNodeShouldFail(StartupOption operation,
- String exceptionSubstring) {
- try {
- //cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail
- // we set manage dirs to true as NN has to start from untar'ed image with
- // nn dirs set to name1 and name2
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .format(false)
- .clusterId(clusterId)
- .startupOption(operation)
- .build(); // should fail
- throw new AssertionError("NameNode should have failed to start");
- } catch (Exception expected) {
- GenericTestUtils.assertExceptionContains(
- exceptionSubstring, expected);
- }
- }
-
- /**
- * Attempts to start a DataNode with the given operation. Starting
- * the DataNode should throw an exception.
- */
- void startDataNodeShouldFail(StartupOption operation) {
- try {
- cluster.startDataNodes(conf, 1, false, operation, null); // should fail
- throw new AssertionError("DataNode should have failed to start");
- } catch (Exception expected) {
- // expected
- assertFalse(cluster.isDataNodeUp());
- }
- }
-
- /**
- */
- @Test(timeout=300000) // 5 min timeout
- public void testDistributedUpgrade() throws Exception {
- int numDirs = 1;
- TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
- testImg.unpackStorage();
- int numDNs = testImg.numDataNodes;
-
- // register new upgrade objects (ignore all existing)
- UpgradeObjectCollection.initialize();
- UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
- UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
- UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
- UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
- UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
- UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
-
- conf = new HdfsConfiguration();
- if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
- System.setProperty("test.build.data", "build/test/data");
- }
- conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
-
- log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
- startNameNodeShouldFail(StartupOption.REGULAR, "contains an old layout version");
-
- log("Start NameNode only distributed upgrade", numDirs);
- // cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
- // .startupOption(StartupOption.UPGRADE).build();
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .format(false)
- .clusterId(clusterId)
- .startupOption(StartupOption.UPGRADE)
- .build();
- cluster.shutdown();
-
- log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
- startNameNodeShouldFail(StartupOption.REGULAR,
- "Previous distributed upgrade was not completed");
-
- log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
- startNameNodeShouldFail(StartupOption.ROLLBACK,
- "Cannot rollback to storage version -7 using this version");
-
- log("Normal distributed upgrade for the cluster", numDirs);
- cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDNs)
- .format(false)
- .clusterId(clusterId)
- .startupOption(StartupOption.UPGRADE)
- .build();
- DFSAdmin dfsAdmin = new DFSAdmin();
- dfsAdmin.setConf(conf);
- dfsAdmin.run(new String[] {"-safemode", "wait"});
- cluster.shutdown();
-
- // it should be ok to start in regular mode
- log("NameCluster regular startup after the upgrade", numDirs);
- cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDNs)
- .clusterId(clusterId)
- .format(false)
- .startupOption(StartupOption.REGULAR)
- .build();
-
- cluster.waitActive();
- cluster.shutdown();
- }
-
- public static void main(String[] args) throws Exception {
- new TestDistributedUpgrade().testDistributedUpgrade();
- LOG.info("=== DONE ===");
- }
-}
-
-/**
- * Upgrade object for data-node
- */
-class UO_Datanode extends UpgradeObjectDatanode {
- int version;
-
- UO_Datanode(int v) {
- this.status = (short)0;
- version = v;
- }
-
- public int getVersion() {
- return version;
- }
-
- public void doUpgrade() throws IOException {
- this.status = (short)100;
- DatanodeProtocol nn = getNamenode();
- nn.processUpgradeCommand(
- new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS,
- getVersion(), getUpgradeStatus()));
- }
-
- public UpgradeCommand startUpgrade() throws IOException {
- return null;
- }
-}
-
-/**
- * Upgrade object for name-node
- */
-class UO_Namenode extends UpgradeObjectNamenode {
- int version;
-
- UO_Namenode(int v) {
- status = (short)0;
- version = v;
- }
-
- public int getVersion() {
- return version;
- }
-
- synchronized public UpgradeCommand processUpgradeCommand(
- UpgradeCommand command) throws IOException {
- switch(command.getAction()) {
- case UpgradeCommand.UC_ACTION_REPORT_STATUS:
- this.status += command.getCurrentStatus()/8; // 4 reports needed
- break;
- default:
- this.status++;
- }
- return null;
- }
-
- public UpgradeCommand completeUpgrade() throws IOException {
- return null;
- }
-}
-
-class UO_Datanode1 extends UO_Datanode {
- UO_Datanode1() {
- super(LAYOUT_VERSION+1);
- }
-}
-
-class UO_Namenode1 extends UO_Namenode {
- UO_Namenode1() {
- super(LAYOUT_VERSION+1);
- }
-}
-
-class UO_Datanode2 extends UO_Datanode {
- UO_Datanode2() {
- super(LAYOUT_VERSION+2);
- }
-}
-
-class UO_Namenode2 extends UO_Namenode {
- UO_Namenode2() {
- super(LAYOUT_VERSION+2);
- }
-}
-
-class UO_Datanode3 extends UO_Datanode {
- UO_Datanode3() {
- super(LAYOUT_VERSION+3);
- }
-}
-
-class UO_Namenode3 extends UO_Namenode {
- UO_Namenode3() {
- super(LAYOUT_VERSION+3);
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index 726c5d3ce3..f05bf653c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -36,12 +36,7 @@
* Utility class for accessing package-private DataNode information during tests.
*
*/
-public class DataNodeTestUtils {
- public static DatanodeRegistration
- getDNRegistrationByMachineName(DataNode dn, String mName) {
- return dn.getDNRegistrationByMachineName(mName);
- }
-
+public class DataNodeTestUtils {
public static DatanodeRegistration
getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
return dn.getDNRegistrationForBP(bpid);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 8d9ee07ea0..e69b1c3021 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -383,7 +383,7 @@ private SimulatedBPStorage getBPStorage(String bpid) throws IOException {
public SimulatedFSDataset(DataNode datanode, DataStorage storage,
Configuration conf) {
if (storage != null) {
- storage.createStorageID(datanode.getPort());
+ storage.createStorageID(datanode.getXferPort());
this.storageId = storage.getStorageID();
} else {
this.storageId = "unknownStorageId" + new Random().nextInt();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 7531f8e3d4..d575d44efd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -197,9 +197,9 @@ private void testSyncReplicas(ReplicaRecoveryInfo replica1,
locs, RECOVERY_ID);
ArrayList syncList = new ArrayList(2);
BlockRecord record1 = new BlockRecord(
- new DatanodeID("xx", "yy", 44, 55), dn1, replica1);
+ new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
BlockRecord record2 = new BlockRecord(
- new DatanodeID("aa", "bb", 11, 22), dn2, replica2);
+ new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
@@ -402,7 +402,7 @@ public void testRWRReplicas() throws IOException {
private Collection initRecoveringBlocks() throws IOException {
Collection blocks = new ArrayList(1);
DatanodeInfo mockOtherDN = new DatanodeInfo(
- new DatanodeID("127.0.0.1", "storage-1234", 0, 0));
+ new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
DatanodeInfo[] locs = new DatanodeInfo[] {
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
mockOtherDN };
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index 07c2425835..78d20ad655 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -162,16 +162,16 @@ public void testBlockReplacement() throws IOException, TimeoutException {
// start to replace the block
// case 1: proxySource does not contain the block
- LOG.info("Testcase 1: Proxy " + newNode.getName()
+ LOG.info("Testcase 1: Proxy " + newNode
+ " does not contain the block " + b);
assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
// case 2: destination already contains the block
- LOG.info("Testcase 2: Destination " + proxies.get(1).getName()
+ LOG.info("Testcase 2: Destination " + proxies.get(1)
+ " contains the block " + b);
assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
// case 3: correct case
- LOG.info("Testcase 3: Source=" + source.getName() + " Proxy=" +
- proxies.get(0).getName() + " Destination=" + newNode.getName() );
+ LOG.info("Testcase 3: Source=" + source + " Proxy=" +
+ proxies.get(0) + " Destination=" + newNode );
assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
// after cluster has time to resolve the over-replication,
// block locations should contain two proxies and newNode
@@ -181,7 +181,7 @@ public void testBlockReplacement() throws IOException, TimeoutException {
DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
// case 4: proxies.get(0) is not a valid del hint
// expect either source or newNode replica to be deleted instead
- LOG.info("Testcase 4: invalid del hint " + proxies.get(0).getName() );
+ LOG.info("Testcase 4: invalid del hint " + proxies.get(0) );
assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source));
// after cluster has time to resolve the over-replication,
// block locations should contain two proxies,
@@ -222,7 +222,7 @@ private void checkBlocks(DatanodeInfo[] includeNodes, String fileName,
for (DatanodeInfo node : includeNodes) {
if (!nodeLocations.contains(node) ) {
notDone=true;
- LOG.info("Block is not located at " + node.getName() );
+ LOG.info("Block is not located at " + node );
break;
}
}
@@ -231,9 +231,9 @@ private void checkBlocks(DatanodeInfo[] includeNodes, String fileName,
String expectedNodesList = "";
String currentNodesList = "";
for (DatanodeInfo dn : includeNodes)
- expectedNodesList += dn.getName() + ", ";
+ expectedNodesList += dn + ", ";
for (DatanodeInfo dn : nodes)
- currentNodesList += dn.getName() + ", ";
+ currentNodesList += dn + ", ";
LOG.info("Expected replica nodes are: " + expectedNodesList);
LOG.info("Current actual replica nodes are: " + currentNodesList);
throw new TimeoutException(
@@ -254,7 +254,7 @@ private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
- destination.getName()), HdfsServerConstants.READ_TIMEOUT);
+ destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
index 985900030e..f2cb248ae1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
@@ -679,8 +679,9 @@ private void startDNandWait(Path filePath, boolean waitReplicas)
assertEquals(datanodes.size(), 2);
if(LOG.isDebugEnabled()) {
+ int lastDn = datanodes.size() - 1;
LOG.debug("New datanode "
- + cluster.getDataNodes().get(datanodes.size() - 1).getMachineName()
+ + cluster.getDataNodes().get(lastDn).getDisplayName()
+ " has been started");
}
if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index 20a16c3166..351a61cc45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -161,8 +161,8 @@ public void testFedSingleNN() throws IOException {
assertEquals("number of volumes is wrong", 2, volInfos.size());
for (BPOfferService bpos : dn.getAllBpOs()) {
- LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration.name + "; sid="
- + bpos.bpRegistration.storageID + "; nna=" +
+ LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="
+ + bpos.bpRegistration.getStorageID() + "; nna=" +
getNNSocketAddress(bpos));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index e66dc56900..04aa71d8b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -270,7 +270,7 @@ private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
Socket s = null;
ExtendedBlock block = lblock.getBlock();
- targetAddr = NetUtils.createSocketAddr(datanode.getName());
+ targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
index 2ff075c8ad..080f47ca9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
@@ -183,7 +183,7 @@ public void testDfsAdminDeleteBlockPool() throws Exception {
Assert.assertEquals(1, dn1.getAllBpOs().length);
DFSAdmin admin = new DFSAdmin(nn1Conf);
- String dn1Address = dn1.getSelfAddr().getHostName()+":"+dn1.getIpcPort();
+ String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
int ret = admin.run(args);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index 74434e5a44..7a83bf3408 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -136,7 +136,7 @@ public void testReplicationError() throws Exception {
DataNode datanode = cluster.getDataNodes().get(sndNode);
// replicate the block to the second datanode
- InetSocketAddress target = datanode.getSelfAddr();
+ InetSocketAddress target = datanode.getXferAddress();
Socket s = new Socket(target.getAddress(), target.getPort());
// write the header.
DataOutputStream out = new DataOutputStream(s.getOutputStream());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
index cecf2eddbb..9f96eac70f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
@@ -348,7 +348,7 @@ public void testInterDNProtocolTimeout() throws Throwable {
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = new DatanodeID(
- "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
+ "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 61d7516863..73884c5b1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -766,28 +766,33 @@ private static class TinyDatanode implements Comparable {
long[] blockReportList;
/**
- * Get data-node in the form
- * :
- * where port is a 6 digit integer.
+ * Return a a 6 digit integer port.
* This is necessary in order to provide lexocographic ordering.
* Host names are all the same, the ordering goes by port numbers.
*/
- private static String getNodeName(int port) throws IOException {
- String machineName = DNS.getDefaultHost("default", "default");
- String sPort = String.valueOf(100000 + port);
- if(sPort.length() > 6)
- throw new IOException("Too many data-nodes.");
- return machineName + ":" + sPort;
+ private static int getNodePort(int num) throws IOException {
+ int port = 100000 + num;
+ if (String.valueOf(port).length() > 6) {
+ throw new IOException("Too many data-nodes");
+ }
+ return port;
}
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
- dnRegistration = new DatanodeRegistration(getNodeName(dnIdx));
+ String hostName = DNS.getDefaultHost("default", "default");
+ dnRegistration = new DatanodeRegistration(hostName);
+ dnRegistration.setXferPort(getNodePort(dnIdx));
+ dnRegistration.setHostName(hostName);
this.blocks = new ArrayList(blockCapacity);
this.nrBlocks = 0;
}
- String getName() {
- return dnRegistration.getName();
+ public String toString() {
+ return dnRegistration.toString();
+ }
+
+ String getXferAddr() {
+ return dnRegistration.getXferAddr();
}
void register() throws IOException {
@@ -850,8 +855,8 @@ long[] getBlockReportList() {
return blockReportList;
}
- public int compareTo(String name) {
- return getName().compareTo(name);
+ public int compareTo(String xferAddr) {
+ return getXferAddr().compareTo(xferAddr);
}
/**
@@ -889,10 +894,12 @@ private int transferBlocks( Block blocks[],
for(int t = 0; t < blockTargets.length; t++) {
DatanodeInfo dnInfo = blockTargets[t];
DatanodeRegistration receivedDNReg;
- receivedDNReg = new DatanodeRegistration(dnInfo.getName());
+ receivedDNReg = new DatanodeRegistration(dnInfo.getIpAddr());
receivedDNReg.setStorageInfo(
new DataStorage(nsInfo, dnInfo.getStorageID()));
+ receivedDNReg.setXferPort(dnInfo.getXferPort());
receivedDNReg.setInfoPort(dnInfo.getInfoPort());
+ receivedDNReg.setIpcPort(dnInfo.getIpcPort());
ReceivedDeletedBlockInfo[] rdBlocks = {
new ReceivedDeletedBlockInfo(
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
@@ -977,10 +984,10 @@ void generateInputs(int[] ignore) throws IOException {
for(int idx=0; idx < nrDatanodes; idx++) {
datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
datanodes[idx].register();
- assert datanodes[idx].getName().compareTo(prevDNName) > 0
+ assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0
: "Data-nodes must be sorted lexicographically.";
datanodes[idx].sendHeartbeat();
- prevDNName = datanodes[idx].getName();
+ prevDNName = datanodes[idx].getXferAddr();
}
// create files
@@ -1010,7 +1017,7 @@ private ExtendedBlock addBlocks(String fileName, String clientName)
LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null);
prevBlock = loc.getBlock();
for(DatanodeInfo dnInfo : loc.getLocations()) {
- int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
+ int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
loc.getBlock().getLocalBlock(),
@@ -1165,9 +1172,9 @@ private void decommissionNodes() throws IOException {
for(int i=0; i < nodesToDecommission; i++) {
TinyDatanode dn = blockReportObject.datanodes[nrDatanodes-1-i];
numDecommissionedBlocks += dn.nrBlocks;
- excludeFile.write(dn.getName().getBytes());
+ excludeFile.write(dn.getXferAddr().getBytes());
excludeFile.write('\n');
- LOG.info("Datanode " + dn.getName() + " is decommissioned.");
+ LOG.info("Datanode " + dn + " is decommissioned.");
}
excludeFile.close();
nameNodeProto.refreshNodes();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 9934a6f534..463cd952fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -156,7 +156,7 @@ private String decommissionNode(FSNamesystem namesystem,
throws IOException {
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
- String nodename = info[nodeIndex].getName();
+ String nodename = info[nodeIndex].getXferAddr();
System.out.println("Decommissioning node: " + nodename);
// write nodename into the exclude file.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 6647042145..97a81d3a77 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -93,6 +93,15 @@ public void testNameNodeMXBeanInfo() throws Exception {
// get attribute alivenodeinfo
String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName,
"LiveNodes"));
+ Map> liveNodes =
+ (Map>) JSON.parse(alivenodeinfo);
+ assertTrue(liveNodes.size() > 0);
+ for (Map liveNode : liveNodes.values()) {
+ assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
+ assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
+ assertTrue(liveNode.containsKey("capacity"));
+ assertTrue(((Long)liveNode.get("capacity")) > 0);
+ }
Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo);
// get attribute deadnodeinfo
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
index ddfd573b4c..d7f2ff9789 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
@@ -167,7 +167,7 @@ public void testDatanodeRestarts() throws Exception {
// Stop the DN.
DataNode dn = cluster.getDataNodes().get(0);
- String dnName = dn.getDatanodeId().getName();
+ String dnName = dn.getDatanodeId().getXferAddr();
DataNodeProperties dnProps = cluster.stopDataNode(0);
// Make sure both NNs register it as dead.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index 9ebc13e5ed..36b2220641 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -65,12 +65,11 @@ public class TestOfflineEditsViewer {
*
* These are the opcodes that are not used anymore, some
* are marked deprecated, we need to include them here to make
- * sure we exclude them when checking for completness of testing,
+ * sure we exclude them when checking for completeness of testing,
* that's why the "deprecation" warnings are suppressed.
*/
@SuppressWarnings("deprecation")
private static void initializeObsoleteOpCodes() {
- // these are obsolete
obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_ADD, true);
obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_REMOVE, true);
obsoleteOpCodes.put(FSEditLogOpCodes.OP_SET_NS_QUOTA, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
index 5a4643f410..36c38ef4da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
@@ -30,16 +30,16 @@
public class TestNetworkTopology extends TestCase {
private final static NetworkTopology cluster = new NetworkTopology();
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3"),
- new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r3")
+ new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"),
+ new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3")
};
private final static DatanodeDescriptor NODE =
- new DatanodeDescriptor(new DatanodeID("h8:5020"), "/d2/r4");
+ new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4");
static {
for(int i=0; i map = new TreeMap();
+ if (dfs != null) {
+ map.put("namenode_port", dfs.getNameNodePort());
+ }
+
+ FileWriter fw = new FileWriter(new File(writeDetails));
+ fw.write(new JSON().toJSON(map));
+ fw.close();
+ }
+ }
+
+ /**
+ * Parses arguments and fills out the member variables.
+ * @param args Command-line arguments.
+ * @return true on successful parse; false to indicate that the
+ * program should exit.
+ */
+ private boolean parseArguments(String[] args) {
+ Options options = makeOptions();
+ CommandLine cli;
+ try {
+ CommandLineParser parser = new GnuParser();
+ cli = parser.parse(options, args);
+ } catch(ParseException e) {
+ LOG.warn("options parsing failed: "+e.getMessage());
+ new HelpFormatter().printHelp("...", options);
+ return false;
+ }
+
+ if (cli.hasOption("help")) {
+ new HelpFormatter().printHelp("...", options);
+ return false;
+ }
+
+ if (cli.getArgs().length > 0) {
+ for (String arg : cli.getArgs()) {
+ LOG.error("Unrecognized option: " + arg);
+ new HelpFormatter().printHelp("...", options);
+ return false;
+ }
+ }
+
+ // HDFS
+ numDataNodes = intArgument(cli, "datanodes", 1);
+ nameNodePort = intArgument(cli, "nnport", 0);
+ dfsOpts = cli.hasOption("format") ?
+ StartupOption.FORMAT : StartupOption.REGULAR;
+
+ // Runner
+ writeDetails = cli.getOptionValue("writeDetails");
+ writeConfig = cli.getOptionValue("writeConfig");
+
+ // General
+ conf = new HdfsConfiguration();
+ updateConfiguration(conf, cli.getOptionValues("D"));
+
+ return true;
+ }
+
+ /**
+ * Updates configuration based on what's given on the command line.
+ *
+ * @param conf2 The configuration object
+ * @param keyvalues An array of interleaved key value pairs.
+ */
+ private void updateConfiguration(Configuration conf2, String[] keyvalues) {
+ int num_confs_updated = 0;
+ if (keyvalues != null) {
+ for (String prop : keyvalues) {
+ String[] keyval = prop.split("=", 2);
+ if (keyval.length == 2) {
+ conf2.set(keyval[0], keyval[1]);
+ num_confs_updated++;
+ } else {
+ LOG.warn("Ignoring -D option " + prop);
+ }
+ }
+ }
+ LOG.info("Updated " + num_confs_updated +
+ " configuration settings from command line.");
+ }
+
+ /**
+ * Extracts an integer argument with specified default value.
+ */
+ private int intArgument(CommandLine cli, String argName, int defaultValue) {
+ String o = cli.getOptionValue(argName);
+ try {
+ if (o != null) {
+ return Integer.parseInt(o);
+ }
+ } catch (NumberFormatException ex) {
+ LOG.error("Couldn't parse value (" + o + ") for option "
+ + argName + ". Using default: " + defaultValue);
+ }
+
+ return defaultValue;
+ }
+
+ /**
+ * Starts a MiniDFSClusterManager with parameters drawn from the command line.
+ */
+ public static void main(String[] args) throws IOException {
+ new MiniDFSClusterManager().run(args);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-14-dfs-dir.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-14-dfs-dir.tgz
deleted file mode 100644
index 4d571a5951..0000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-14-dfs-dir.tgz and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt
index 3d1b67dae3..0fbc3189aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt
@@ -19,18 +19,6 @@
# See HADOOP-1629 for more info if needed.
# These two files are used by unit test TestDFSUpgradeFromImage.java
#
-# hadoop-14-dfs-dir.tgz :
-# ---------------------
-# This file contains the HDFS directory structure for one namenode and 4 datanodes.
-# The structure is setup similar to the structure used in MiniDFSCluster.
-# The directory was created with Hadoo-0.14.x.
-#
-# In the test, this directory is unpacked and MiniDFSCluster is run with
-# "-upgrade" option. The test waits for the upgrade to complete
-# (leave safe mode) and then all the files are read. The test checks that the
-# directory structure and file checksums exactly match the information
-# in this file.
-#
# hadoop-dfs-dir.txt :
# ---------------------
# Along with this description this file contains the expected files and
@@ -43,9 +31,6 @@
# For e.g. "top-dir-1Mb-512" contains files created with dfs.blocksize of 1Mb
# and io.bytes.per.checksum of 512.
#
-# In the future, when Hadoop project no longer supports upgrade from
-# Hadoop-0.12, then a new DFS directory image must be created.
-#
# To generate checksum info for new files :
# ---------------------------------------
# Uncomment the last coment (starts with "printChecksums") and run the
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 7f9432a5f8..8f769cec38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -15420,7 +15420,7 @@
SubstringComparator
- setSpaceQuota: java.io.FileNotFoundException: Directory does not exist: /test1
+ setSpaceQuota: Directory does not exist: /test1
@@ -15486,7 +15486,7 @@
SubstringComparator
- clrQuota: java.io.FileNotFoundException: Directory does not exist: /test1
+ clrQuota: Directory does not exist: /test1
@@ -15506,7 +15506,7 @@
RegexpComparator
- put: org.apache.hadoop.hdfs.protocol.DSQuotaExceededException: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]*
+ put: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]*
@@ -15526,7 +15526,7 @@
SubstringComparator
- mkdir: org.apache.hadoop.hdfs.protocol.NSQuotaExceededException: The NameSpace quota (directories and files) of directory /dir1 is exceeded: quota=1 file count=2
+ mkdir: The NameSpace quota (directories and files) of directory /dir1 is exceeded: quota=1 file count=2
@@ -15739,6 +15739,10 @@
RegexpComparator
Name: [0-9\.:]+ \([-.a-zA-z0-9\.]+\)
+
+ RegexpComparator
+ Hostname: [-.a-zA-z0-9\.]+
+
RegexpComparator
Decommission Status : [a-zA-Z]+
@@ -15792,7 +15796,7 @@
TokenComparator
- saveNamespace: java.io.IOException: Safe mode should be turned ON in order to create namespace image.
+ saveNamespace: Safe mode should be turned ON in order to create namespace image.
@@ -15836,6 +15840,10 @@
RegexpComparator
Name: [0-9\.:]+ \([-.a-zA-z0-9\.]+\)
+
+ RegexpComparator
+ Hostname: [-.a-zA-z0-9\.]+
+
RegexpComparator
Decommission Status : [a-zA-Z]+
@@ -16168,7 +16176,7 @@
SubstringComparator
- setQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.
+ setQuota: Cannot set quota on /test. Name node is in safe mode.
@@ -16187,7 +16195,7 @@
SubstringComparator
- clrQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.
+ clrQuota: Cannot set quota on /test. Name node is in safe mode.
@@ -16207,7 +16215,7 @@
SubstringComparator
- setSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.
+ setSpaceQuota: Cannot set quota on /test. Name node is in safe mode.
@@ -16226,7 +16234,7 @@
SubstringComparator
- clrSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.
+ clrSpaceQuota: Cannot set quota on /test. Name node is in safe mode.
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d0c538e888..a28d38f2bb 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -52,6 +52,9 @@ Trunk (unreleased changes)
BUG FIXES
+ MAPREDUCE-4087. [Gridmix] GenerateDistCacheData job of Gridmix can
+ become slow in some cases (ravigummadi).
+
MAPREDUCE-3953. [Gridmix] Gridmix throws NPE and does not simulate a
job if the trace contains null taskStatus for a task.
(ravigummadi)
@@ -133,6 +136,9 @@ Release 2.0.0 - UNRELEASED
MAPREDUCE-3353. Add a channel between RM and AM to get information on
nodes. (Bikas Saha via acmurthy)
+ MAPREDUCE-3955. Change MR to use ProtobufRpcEngine from hadoop-common
+ instead of ProtoOverHadoopRpcEngine. (Jitendra Nath Pandey via sseth)
+
OPTIMIZATIONS
BUG FIXES
@@ -176,6 +182,19 @@ Release 2.0.0 - UNRELEASED
MAPREDUCE-4066. Use default value when fetching MR_AM_STAGING_DIR
(xieguiming via harsh)
+ MAPREDUCE-3377. Added a unit test to ensure OutputCommitter.checkOutputSpecs
+ is called prior to copying job.xml. (Jane Chen via acmurthy)
+
+ MAPREDUCE-4081. TestMROutputFormat.java does not compile (Jason Lowe via
+ bobby)
+
+ MAPREDUCE-4082. hadoop-mapreduce-client-app's mrapp-generated-classpath
+ file should not be in the module JAR (tucu)
+
+ MAPREDUCE-3916. various issues with running yarn proxyserver (devaraj via tucu)
+
+ MAPREDUCE-4091. tools testcases failing because of MAPREDUCE-4082 (tucu)
+
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -219,6 +238,9 @@ Release 0.23.2 - UNRELEASED
MAPREDUCE-4043. Secret keys set in Credentials are not seen by tasks
(Jason Lowe via bobby)
+ MAPREDUCE-3989. Cap space usage of default log4j rolling policy.
+ (Patrick Hunt via eli)
+
OPTIMIZATIONS
MAPREDUCE-3901. Modified JobHistory records in YARN to lazily load job and
diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
index 2272ae9564..4cd6eb1ec8 100644
--- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
+++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
@@ -90,7 +90,7 @@ fi
# some variables
export HADOOP_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
-export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,DRFA}
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,RFA}
export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index 2059d28038..b63d181b6a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -86,6 +86,11 @@
test-compile
+
+
+ mrapp-generated-classpath
+
+
maven-dependency-plugin
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
index b94e4122a8..2b8efea9e5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
@@ -21,12 +21,12 @@
import java.lang.annotation.Annotation;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.yarn.proto.MRClientProtocol;
import org.apache.hadoop.yarn.security.client.ClientTokenSelector;
public class MRClientSecurityInfo extends SecurityInfo {
@@ -38,7 +38,7 @@ public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
@Override
public TokenInfo getTokenInfo(Class> protocol, Configuration conf) {
- if (!protocol.equals(MRClientProtocol.MRClientProtocolService.BlockingInterface.class)) {
+ if (!protocol.equals(MRClientProtocolPB.class)) {
return null;
}
return new TokenInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java
index 968d0423a7..dfb7469490 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java
@@ -19,10 +19,10 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
-import org.apache.hadoop.yarn.proto.HSClientProtocol;
/**
* {@link PolicyProvider} for YARN MapReduce protocols.
@@ -35,7 +35,7 @@ public class ClientHSPolicyProvider extends PolicyProvider {
new Service[] {
new Service(
JHAdminConfig.MR_HS_SECURITY_SERVICE_AUTHORIZATION,
- HSClientProtocol.HSClientProtocolService.BlockingInterface.class)
+ HSClientProtocolPB.class)
};
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java
index 3f6ecb4386..24d0c2f7f9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java
@@ -21,9 +21,9 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
-import org.apache.hadoop.yarn.proto.MRClientProtocol;
/**
* {@link PolicyProvider} for YARN MapReduce protocols.
@@ -39,7 +39,7 @@ public class MRAMPolicyProvider extends PolicyProvider {
TaskUmbilicalProtocol.class),
new Service(
MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT,
- MRClientProtocol.MRClientProtocolService.BlockingInterface.class)
+ MRClientProtocolPB.class)
};
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocolPB.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocolPB.java
new file mode 100644
index 0000000000..8c9a007b9b
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocolPB.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService;
+
+@ProtocolInfo(protocolName = "org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB",
+ protocolVersion = 1)
+public interface HSClientProtocolPB extends HSClientProtocolService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocolPB.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocolPB.java
new file mode 100644
index 0000000000..835a161bec
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocolPB.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService;
+
+@ProtocolInfo(
+ protocolName = "org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB",
+ protocolVersion = 1)
+public interface MRClientProtocolPB extends MRClientProtocolService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java
index aa5d40e8e7..156930325b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java
@@ -22,10 +22,10 @@
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService;
+import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB;
public class HSClientProtocolPBClientImpl extends MRClientProtocolPBClientImpl
implements HSClientProtocol {
@@ -33,9 +33,9 @@ public class HSClientProtocolPBClientImpl extends MRClientProtocolPBClientImpl
public HSClientProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
super();
- RPC.setProtocolEngine(conf, HSClientProtocolService.BlockingInterface.class,
- ProtoOverHadoopRpcEngine.class);
- proxy = (HSClientProtocolService.BlockingInterface)RPC.getProxy(
- HSClientProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, HSClientProtocolPB.class,
+ ProtobufRpcEngine.class);
+ proxy = (HSClientProtocolPB)RPC.getProxy(
+ HSClientProtocolPB.class, clientVersion, addr, conf);
}
}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
index 1fb57f972c..cf14532902 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
@@ -23,8 +23,10 @@
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
@@ -86,21 +88,20 @@
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
import com.google.protobuf.ServiceException;
public class MRClientProtocolPBClientImpl implements MRClientProtocol {
- protected MRClientProtocolService.BlockingInterface proxy;
+ protected MRClientProtocolPB proxy;
public MRClientProtocolPBClientImpl() {};
public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
- RPC.setProtocolEngine(conf, MRClientProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
- proxy = (MRClientProtocolService.BlockingInterface)RPC.getProxy(
- MRClientProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class);
+ proxy = (MRClientProtocolPB)RPC.getProxy(
+ MRClientProtocolPB.class, clientVersion, addr, conf);
}
@Override
@@ -110,13 +111,7 @@ public GetJobReportResponse getJobReport(GetJobReportRequest request)
try {
return new GetJobReportResponsePBImpl(proxy.getJobReport(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -127,13 +122,7 @@ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
try {
return new GetTaskReportResponsePBImpl(proxy.getTaskReport(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -144,13 +133,7 @@ public GetTaskAttemptReportResponse getTaskAttemptReport(
try {
return new GetTaskAttemptReportResponsePBImpl(proxy.getTaskAttemptReport(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -161,13 +144,7 @@ public GetCountersResponse getCounters(GetCountersRequest request)
try {
return new GetCountersResponsePBImpl(proxy.getCounters(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -178,13 +155,7 @@ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
try {
return new GetTaskAttemptCompletionEventsResponsePBImpl(proxy.getTaskAttemptCompletionEvents(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -195,13 +166,7 @@ public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
try {
return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -212,13 +177,7 @@ public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
try {
return new GetDiagnosticsResponsePBImpl(proxy.getDiagnostics(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -231,13 +190,7 @@ public GetDelegationTokenResponse getDelegationToken(
return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken(
null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -248,13 +201,7 @@ public KillJobResponse killJob(KillJobRequest request)
try {
return new KillJobResponsePBImpl(proxy.killJob(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -265,13 +212,7 @@ public KillTaskResponse killTask(KillTaskRequest request)
try {
return new KillTaskResponsePBImpl(proxy.killTask(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -282,13 +223,7 @@ public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request)
try {
return new KillTaskAttemptResponsePBImpl(proxy.killTaskAttempt(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -299,13 +234,7 @@ public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request)
try {
return new FailTaskAttemptResponsePBImpl(proxy.failTaskAttempt(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java
index cc9fd81278..4c4882a6f2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java
@@ -19,10 +19,10 @@
package org.apache.hadoop.mapreduce.v2.api.impl.pb.service;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
-import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService.BlockingInterface;
+import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB;
public class HSClientProtocolPBServiceImpl extends MRClientProtocolPBServiceImpl
- implements BlockingInterface {
+ implements HSClientProtocolPB {
public HSClientProtocolPBServiceImpl(HSClientProtocol impl) {
super(impl);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
index 17725a7c40..90881215fa 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.mapreduce.v2.api.impl.pb.service;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
@@ -91,12 +92,11 @@
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService.BlockingInterface;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-public class MRClientProtocolPBServiceImpl implements BlockingInterface {
+public class MRClientProtocolPBServiceImpl implements MRClientProtocolPB {
private MRClientProtocol real;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
index 187bab06cb..43fc815232 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
@@ -21,20 +21,20 @@
import java.lang.annotation.Annotation;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.yarn.proto.HSClientProtocol;
public class ClientHSSecurityInfo extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
if (!protocol
- .equals(HSClientProtocol.HSClientProtocolService.BlockingInterface.class)) {
+ .equals(HSClientProtocolPB.class)) {
return null;
}
return new KerberosInfo() {
@@ -59,7 +59,7 @@ public String clientPrincipal() {
@Override
public TokenInfo getTokenInfo(Class> protocol, Configuration conf) {
if (!protocol
- .equals(HSClientProtocol.HSClientProtocolService.BlockingInterface.class)) {
+ .equals(HSClientProtocolPB.class)) {
return null;
}
return new TokenInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java
index 574836c4e4..c0f3a0aa8a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java
@@ -18,14 +18,23 @@
package org.apache.hadoop.yarn.proto;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
+import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService;
+
/**
* Fake protocol to differentiate the blocking interfaces in the
* security info class loaders.
*/
public interface HSClientProtocol {
public abstract class HSClientProtocolService {
- public interface BlockingInterface extends
- MRClientProtocol.MRClientProtocolService.BlockingInterface {
+ public interface BlockingInterface extends MRClientProtocolPB {
+ }
+
+ public static com.google.protobuf.BlockingService newReflectiveBlockingService(
+ final HSClientProtocolService.BlockingInterface impl) {
+ // The cast is safe
+ return MRClientProtocolService
+ .newReflectiveBlockingService((MRClientProtocolService.BlockingInterface) impl);
}
}
}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
index b9a76b44c2..5c7b55270e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
@@ -96,7 +97,7 @@ public class HistoryClientService extends AbstractService {
private static final Log LOG = LogFactory.getLog(HistoryClientService.class);
- private MRClientProtocol protocolHandler;
+ private HSClientProtocol protocolHandler;
private Server server;
private WebApp webApp;
private InetSocketAddress bindAddress;
@@ -107,7 +108,7 @@ public HistoryClientService(HistoryContext history,
JHSDelegationTokenSecretManager jhsDTSecretManager) {
super("HistoryClientService");
this.history = history;
- this.protocolHandler = new MRClientProtocolHandler();
+ this.protocolHandler = new HSClientProtocolHandler();
this.jhsDTSecretManager = jhsDTSecretManager;
}
@@ -128,7 +129,7 @@ public void start() {
}
server =
- rpc.getServer(MRClientProtocol.class, protocolHandler, address,
+ rpc.getServer(HSClientProtocol.class, protocolHandler, address,
conf, jhsDTSecretManager,
conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT,
JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT));
@@ -177,7 +178,7 @@ public InetSocketAddress getBindAddress() {
return this.bindAddress;
}
- private class MRClientProtocolHandler implements MRClientProtocol {
+ private class HSClientProtocolHandler implements HSClientProtocol {
private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 769ae9e89b..b51166a11c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
@@ -340,9 +341,10 @@ public GetDelegationTokenResponse getDelegationToken(
}
}
- class HistoryService extends AMService {
+ class HistoryService extends AMService implements HSClientProtocol {
public HistoryService() {
super(HSHOSTADDRESS);
+ this.protocol = HSClientProtocol.class;
}
@Override
@@ -357,6 +359,7 @@ public GetCountersResponse getCounters(GetCountersRequest request) throws YarnRe
class AMService extends AbstractService
implements MRClientProtocol {
+ protected Class> protocol;
private InetSocketAddress bindAddress;
private Server server;
private final String hostAddress;
@@ -367,6 +370,7 @@ public AMService() {
public AMService(String hostAddress) {
super("AMService");
+ this.protocol = MRClientProtocol.class;
this.hostAddress = hostAddress;
}
@@ -383,7 +387,7 @@ public void start(Configuration conf) {
}
server =
- rpc.getServer(MRClientProtocol.class, this, address,
+ rpc.getServer(protocol, this, address,
conf, null, 1);
server.start();
this.bindAddress =
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java
new file mode 100644
index 0000000000..88d118803d
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapred.JobConf;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+public class TestMROutputFormat {
+
+ @Test
+ public void testJobSubmission() throws Exception {
+ JobConf conf = new JobConf();
+ Job job = new Job(conf);
+ job.setInputFormatClass(TestInputFormat.class);
+ job.setMapperClass(TestMapper.class);
+ job.setOutputFormatClass(TestOutputFormat.class);
+ job.setOutputKeyClass(IntWritable.class);
+ job.setOutputValueClass(IntWritable.class);
+ job.waitForCompletion(true);
+ assertTrue(job.isSuccessful());
+ }
+
+ public static class TestMapper
+ extends Mapper {
+ public void map(IntWritable key, IntWritable value, Context context)
+ throws IOException, InterruptedException {
+ context.write(key, value);
+ }
+ }
+}
+
+class TestInputFormat extends InputFormat {
+
+ @Override
+ public RecordReader createRecordReader(
+ InputSplit split, TaskAttemptContext context) throws IOException,
+ InterruptedException {
+ return new RecordReader() {
+
+ private boolean done = false;
+
+ @Override
+ public void close() throws IOException {
+ }
+
+ @Override
+ public IntWritable getCurrentKey() throws IOException,
+ InterruptedException {
+ return new IntWritable(0);
+ }
+
+ @Override
+ public IntWritable getCurrentValue() throws IOException,
+ InterruptedException {
+ return new IntWritable(0);
+ }
+
+ @Override
+ public float getProgress() throws IOException, InterruptedException {
+ return done ? 0 : 1;
+ }
+
+ @Override
+ public void initialize(InputSplit split, TaskAttemptContext context)
+ throws IOException, InterruptedException {
+ }
+
+ @Override
+ public boolean nextKeyValue() throws IOException, InterruptedException {
+ if (!done) {
+ done = true;
+ return true;
+ }
+ return false;
+ }
+ };
+ }
+
+ @Override
+ public List getSplits(JobContext context) throws IOException,
+ InterruptedException {
+ List list = new ArrayList();
+ list.add(new TestInputSplit());
+ return list;
+ }
+}
+
+class TestInputSplit extends InputSplit implements Writable {
+
+ @Override
+ public long getLength() throws IOException, InterruptedException {
+ return 1;
+ }
+
+ @Override
+ public String[] getLocations() throws IOException, InterruptedException {
+ String[] hosts = {"localhost"};
+ return hosts;
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ }
+}
+
+class TestOutputFormat extends OutputFormat
+implements Configurable {
+
+ public static final String TEST_CONFIG_NAME = "mapred.test.jobsubmission";
+ private Configuration conf;
+
+ @Override
+ public void checkOutputSpecs(JobContext context) throws IOException,
+ InterruptedException {
+ conf.setBoolean(TEST_CONFIG_NAME, true);
+ }
+
+ @Override
+ public OutputCommitter getOutputCommitter(TaskAttemptContext context)
+ throws IOException, InterruptedException {
+ return new OutputCommitter() {
+
+ @Override
+ public void abortTask(TaskAttemptContext taskContext) throws IOException {
+ }
+
+ @Override
+ public void commitTask(TaskAttemptContext taskContext) throws IOException {
+ }
+
+ @Override
+ public boolean needsTaskCommit(TaskAttemptContext taskContext)
+ throws IOException {
+ return false;
+ }
+
+ @Override
+ public void setupJob(JobContext jobContext) throws IOException {
+ }
+
+ @Override
+ public void setupTask(TaskAttemptContext taskContext) throws IOException {
+ }
+ };
+ }
+
+ @Override
+ public RecordWriter getRecordWriter(
+ TaskAttemptContext context) throws IOException, InterruptedException {
+ assertTrue(context.getConfiguration().getBoolean(TEST_CONFIG_NAME, false));
+ return new RecordWriter() {
+
+ @Override
+ public void close(TaskAttemptContext context) throws IOException,
+ InterruptedException {
+ }
+
+ @Override
+ public void write(IntWritable key, IntWritable value) throws IOException,
+ InterruptedException {
+ }
+ };
+ }
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
index d65a198c20..0808eed922 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TypeConverter;
-import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -142,7 +142,7 @@ public void testJobHistoryData() throws IOException, InterruptedException,
LOG.info("CounterMR " + counterMR);
Assert.assertEquals(counterHS, counterMR);
- MRClientProtocol historyClient = instantiateHistoryProxy();
+ HSClientProtocol historyClient = instantiateHistoryProxy();
GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
gjReq.setJobId(jobId);
JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
@@ -164,12 +164,12 @@ private void verifyJobReport(JobReport jobReport, JobId jobId) {
&& jobReport.getFinishTime() >= jobReport.getStartTime());
}
- private MRClientProtocol instantiateHistoryProxy() {
+ private HSClientProtocol instantiateHistoryProxy() {
final String serviceAddr =
mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS);
final YarnRPC rpc = YarnRPC.create(conf);
- MRClientProtocol historyClient =
- (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
+ HSClientProtocol historyClient =
+ (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
NetUtils.createSocketAddr(serviceAddr), mrCluster.getConfig());
return historyClient;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
index c36e99cac7..89ae9d87be 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
+++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
@@ -90,7 +90,7 @@ fi
# some variables
export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
-export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA}
+export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,RFA}
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocolPB.java
new file mode 100644
index 0000000000..d8f0ab7797
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocolPB.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService;
+
+@ProtocolInfo(protocolName = "org.apache.hadoop.yarn.api.AMRMProtocolPB",
+ protocolVersion = 1)
+public interface AMRMProtocolPB extends AMRMProtocolService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocolPB.java
new file mode 100644
index 0000000000..d5930873ef
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocolPB.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService;
+
+@ProtocolInfo(protocolName = "org.apache.hadoop.yarn.api.ClientRMProtocolPB",
+ protocolVersion = 1)
+public interface ClientRMProtocolPB extends ClientRMProtocolService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagerPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagerPB.java
new file mode 100644
index 0000000000..306eaf637e
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagerPB.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService;
+
+@ProtocolInfo(
+ protocolName = "org.apache.hadoop.yarn.api.ContainerManagerPB",
+ protocolVersion = 1)
+public interface ContainerManagerPB extends ContainerManagerService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java
index 615b072f25..ae17ed0f8e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java
@@ -18,12 +18,16 @@
package org.apache.hadoop.yarn.exceptions.impl.pb;
+import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
+import java.lang.reflect.UndeclaredThrowableException;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.proto.YarnProtos.YarnRemoteExceptionProto;
import org.apache.hadoop.yarn.proto.YarnProtos.YarnRemoteExceptionProtoOrBuilder;
+import com.google.protobuf.ServiceException;
public class YarnRemoteExceptionPBImpl extends YarnRemoteException {
@@ -105,4 +109,30 @@ private void maybeInitBuilder() {
}
viaProto = false;
}
+
+ /**
+ * Utility method that unwraps and throws appropriate exception.
+ * @param se ServiceException
+ * @throws YarnRemoteException
+ * @throws UndeclaredThrowableException
+ */
+ public static YarnRemoteException unwrapAndThrowException(ServiceException se)
+ throws UndeclaredThrowableException {
+ if (se.getCause() instanceof RemoteException) {
+ try {
+ throw ((RemoteException) se.getCause())
+ .unwrapRemoteException(YarnRemoteExceptionPBImpl.class);
+ } catch (YarnRemoteException ex) {
+ return ex;
+ } catch (IOException e1) {
+ throw new UndeclaredThrowableException(e1);
+ }
+ } else if (se.getCause() instanceof YarnRemoteException) {
+ return (YarnRemoteException)se.getCause();
+ } else if (se.getCause() instanceof UndeclaredThrowableException) {
+ throw (UndeclaredThrowableException)se.getCause();
+ } else {
+ throw new UndeclaredThrowableException(se);
+ }
+ }
}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java
index 26ec40b87e..c43863c57b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java
@@ -19,12 +19,13 @@
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.AMRMProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
@@ -38,8 +39,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
@@ -48,12 +48,12 @@
public class AMRMProtocolPBClientImpl implements AMRMProtocol {
- private AMRMProtocolService.BlockingInterface proxy;
+ private AMRMProtocolPB proxy;
public AMRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
- RPC.setProtocolEngine(conf, AMRMProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
- proxy = (AMRMProtocolService.BlockingInterface)RPC.getProxy(
- AMRMProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, AMRMProtocolPB.class, ProtobufRpcEngine.class);
+ proxy = (AMRMProtocolPB)RPC.getProxy(
+ AMRMProtocolPB.class, clientVersion, addr, conf);
}
@@ -64,13 +64,7 @@ public AllocateResponse allocate(AllocateRequest request)
try {
return new AllocateResponsePBImpl(proxy.allocate(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -83,13 +77,7 @@ public FinishApplicationMasterResponse finishApplicationMaster(
try {
return new FinishApplicationMasterResponsePBImpl(proxy.finishApplicationMaster(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -100,13 +88,7 @@ public RegisterApplicationMasterResponse registerApplicationMaster(
try {
return new RegisterApplicationMasterResponsePBImpl(proxy.registerApplicationMaster(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
index 81333258bd..4167e29b9d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java
@@ -19,12 +19,13 @@
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.ClientRMProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -66,8 +67,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
@@ -83,12 +83,12 @@
public class ClientRMProtocolPBClientImpl implements ClientRMProtocol {
- private ClientRMProtocolService.BlockingInterface proxy;
+ private ClientRMProtocolPB proxy;
public ClientRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
- RPC.setProtocolEngine(conf, ClientRMProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
- proxy = (ClientRMProtocolService.BlockingInterface)RPC.getProxy(
- ClientRMProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, ClientRMProtocolPB.class, ProtobufRpcEngine.class);
+ proxy = (ClientRMProtocolPB)RPC.getProxy(
+ ClientRMProtocolPB.class, clientVersion, addr, conf);
}
@Override
@@ -98,13 +98,7 @@ public KillApplicationResponse forceKillApplication(
try {
return new KillApplicationResponsePBImpl(proxy.forceKillApplication(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -115,13 +109,7 @@ public GetApplicationReportResponse getApplicationReport(
try {
return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -132,13 +120,7 @@ public GetClusterMetricsResponse getClusterMetrics(
try {
return new GetClusterMetricsResponsePBImpl(proxy.getClusterMetrics(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -149,13 +131,7 @@ public GetNewApplicationResponse getNewApplication(
try {
return new GetNewApplicationResponsePBImpl(proxy.getNewApplication(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -166,13 +142,7 @@ public SubmitApplicationResponse submitApplication(
try {
return new SubmitApplicationResponsePBImpl(proxy.submitApplication(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -185,13 +155,7 @@ public GetAllApplicationsResponse getAllApplications(
return new GetAllApplicationsResponsePBImpl(
proxy.getAllApplications(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -204,13 +168,7 @@ public GetClusterNodesResponse getClusterNodes(
return new GetClusterNodesResponsePBImpl(
proxy.getClusterNodes(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -223,13 +181,7 @@ public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
return new GetQueueInfoResponsePBImpl(
proxy.getQueueInfo(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -242,13 +194,7 @@ public GetQueueUserAclsInfoResponse getQueueUserAcls(
return new GetQueueUserAclsInfoResponsePBImpl(
proxy.getQueueUserAcls(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -261,13 +207,7 @@ public GetDelegationTokenResponse getDelegationToken(
return new GetDelegationTokenResponsePBImpl(
proxy.getDelegationToken(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java
index 34ad56073e..e97accedcd 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java
@@ -19,12 +19,13 @@
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.ContainerManagerPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
@@ -38,8 +39,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerRequestProto;
@@ -48,12 +48,12 @@
public class ContainerManagerPBClientImpl implements ContainerManager {
- private ContainerManagerService.BlockingInterface proxy;
+ private ContainerManagerPB proxy;
public ContainerManagerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
- RPC.setProtocolEngine(conf, ContainerManagerService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
- proxy = (ContainerManagerService.BlockingInterface)RPC.getProxy(
- ContainerManagerService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, ContainerManagerPB.class, ProtobufRpcEngine.class);
+ proxy = (ContainerManagerPB)RPC.getProxy(
+ ContainerManagerPB.class, clientVersion, addr, conf);
}
public void close() {
@@ -69,13 +69,7 @@ public GetContainerStatusResponse getContainerStatus(
try {
return new GetContainerStatusResponsePBImpl(proxy.getContainerStatus(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -86,31 +80,20 @@ public StartContainerResponse startContainer(StartContainerRequest request)
try {
return new StartContainerResponsePBImpl(proxy.startContainer(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@Override
public StopContainerResponse stopContainer(StopContainerRequest request)
throws YarnRemoteException {
- StopContainerRequestProto requestProto = ((StopContainerRequestPBImpl)request).getProto();
+ StopContainerRequestProto requestProto = ((StopContainerRequestPBImpl) request)
+ .getProto();
try {
- return new StopContainerResponsePBImpl(proxy.stopContainer(null, requestProto));
+ return new StopContainerResponsePBImpl(proxy.stopContainer(null,
+ requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
-
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java
index bc8f695cdb..4211690ffc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.api.impl.pb.service;
import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.AMRMProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
@@ -29,7 +30,6 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService.BlockingInterface;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
@@ -40,7 +40,7 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-public class AMRMProtocolPBServiceImpl implements BlockingInterface {
+public class AMRMProtocolPBServiceImpl implements AMRMProtocolPB {
private AMRMProtocol real;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
index 7bece03657..2f0e89c5c2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.api.impl.pb.service;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.ClientRMProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
@@ -50,7 +51,6 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService.BlockingInterface;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
@@ -75,7 +75,7 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-public class ClientRMProtocolPBServiceImpl implements BlockingInterface {
+public class ClientRMProtocolPBServiceImpl implements ClientRMProtocolPB {
private ClientRMProtocol real;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java
index 5f3cf17383..4b0af8156d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.api.impl.pb.service;
import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.ContainerManagerPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
@@ -29,7 +30,6 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService.BlockingInterface;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto;
@@ -40,7 +40,7 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-public class ContainerManagerPBServiceImpl implements BlockingInterface {
+public class ContainerManagerPBServiceImpl implements ContainerManagerPB {
private ContainerManager real;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index f1f2892162..0ea9d1c65e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -26,19 +26,23 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.factories.RpcServerFactory;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
import com.google.protobuf.BlockingService;
public class RpcServerFactoryPBImpl implements RpcServerFactory {
+ private static final Log LOG = LogFactory.getLog(RpcServerFactoryPBImpl.class);
private static final String PROTO_GEN_PACKAGE_NAME = "org.apache.hadoop.yarn.proto";
private static final String PROTO_GEN_CLASS_SUFFIX = "Service";
private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.service";
@@ -96,6 +100,7 @@ public Server getServer(Class> protocol, Object instance,
throw new YarnException(e);
}
+ Class> pbProtocol = service.getClass().getInterfaces()[0];
Method method = protoCache.get(protocol);
if (method == null) {
Class> protoClazz = null;
@@ -106,7 +111,8 @@ public Server getServer(Class> protocol, Object instance,
+ getProtoClassName(protocol) + "]", e);
}
try {
- method = protoClazz.getMethod("newReflectiveBlockingService", service.getClass().getInterfaces()[0]);
+ method = protoClazz.getMethod("newReflectiveBlockingService",
+ pbProtocol.getInterfaces()[0]);
method.setAccessible(true);
protoCache.putIfAbsent(protocol, method);
} catch (NoSuchMethodException e) {
@@ -115,7 +121,7 @@ public Server getServer(Class> protocol, Object instance,
}
try {
- return createServer(addr, conf, secretManager, numHandlers,
+ return createServer(pbProtocol, addr, conf, secretManager, numHandlers,
(BlockingService)method.invoke(null, service));
} catch (InvocationTargetException e) {
throw new YarnException(e);
@@ -148,13 +154,15 @@ private String getPackageName(Class> clazz) {
return clazz.getPackage().getName();
}
- private Server createServer(InetSocketAddress addr, Configuration conf,
+ private Server createServer(Class> pbProtocol, InetSocketAddress addr, Configuration conf,
SecretManager extends TokenIdentifier> secretManager, int numHandlers,
BlockingService blockingService) throws IOException {
- RPC.setProtocolEngine(conf, BlockingService.class, ProtoOverHadoopRpcEngine.class);
- Server server = RPC.getServer(BlockingService.class, blockingService,
+ RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class);
+ RPC.Server server = RPC.getServer(pbProtocol, blockingService,
addr.getHostName(), addr.getPort(), numHandlers, false, conf,
secretManager);
+ LOG.info("Adding protocol "+pbProtocol.getCanonicalName()+" to the server");
+ server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, pbProtocol, blockingService);
return server;
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
deleted file mode 100644
index ca65a27beb..0000000000
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
+++ /dev/null
@@ -1,404 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.ipc;
-
-import java.io.Closeable;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.lang.reflect.InvocationHandler;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.net.InetSocketAddress;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import javax.net.SocketFactory;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataOutputOutputStream;
-import org.apache.hadoop.io.ObjectWritable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtocolMetaInfoPB;
-import org.apache.hadoop.ipc.ProtocolProxy;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcEngine;
-import org.apache.hadoop.ipc.ClientCache;
-import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.ProtoUtil;
-import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
-import org.apache.hadoop.yarn.ipc.RpcProtos.ProtoSpecificRpcRequest;
-import org.apache.hadoop.yarn.ipc.RpcProtos.ProtoSpecificRpcResponse;
-
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.Descriptors.MethodDescriptor;
-import com.google.protobuf.Message;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-
-@InterfaceStability.Evolving
-public class ProtoOverHadoopRpcEngine implements RpcEngine {
- private static final Log LOG = LogFactory.getLog(RPC.class);
-
- private static final ClientCache CLIENTS=new ClientCache();
-
- @Override
- @SuppressWarnings("unchecked")
- public ProtocolProxy getProxy(Class protocol, long clientVersion,
- InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
- SocketFactory factory, int rpcTimeout) throws IOException {
- return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance(protocol
- .getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
- addr, ticket, conf, factory, rpcTimeout)), false);
- }
-
- @Override
- public ProtocolProxy getProtocolMetaInfoProxy(
- ConnectionId connId, Configuration conf, SocketFactory factory)
- throws IOException {
- Class protocol = ProtocolMetaInfoPB.class;
- return new ProtocolProxy(protocol,
- (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(),
- new Class[] { protocol }, new Invoker(protocol, connId, conf,
- factory)), false);
- }
-
- private static class Invoker implements InvocationHandler, Closeable {
- private Map returnTypes = new ConcurrentHashMap();
- private boolean isClosed = false;
- private Client.ConnectionId remoteId;
- private Client client;
-
- public Invoker(Class> protocol, InetSocketAddress addr,
- UserGroupInformation ticket, Configuration conf, SocketFactory factory,
- int rpcTimeout) throws IOException {
- this(protocol, Client.ConnectionId.getConnectionId(addr, protocol,
- ticket, rpcTimeout, conf), conf, factory);
- }
-
- public Invoker(Class> protocol, Client.ConnectionId connId,
- Configuration conf, SocketFactory factory) {
- this.remoteId = connId;
- this.client = CLIENTS.getClient(conf, factory,
- ProtoSpecificResponseWritable.class);
- }
-
- private ProtoSpecificRpcRequest constructRpcRequest(Method method,
- Object[] params) throws ServiceException {
- ProtoSpecificRpcRequest rpcRequest;
- ProtoSpecificRpcRequest.Builder builder;
-
- builder = ProtoSpecificRpcRequest.newBuilder();
- builder.setMethodName(method.getName());
-
- if (params.length != 2) { // RpcController + Message
- throw new ServiceException("Too many parameters for request. Method: ["
- + method.getName() + "]" + ", Expected: 2, Actual: "
- + params.length);
- }
- if (params[1] == null) {
- throw new ServiceException("null param while calling Method: ["
- + method.getName() + "]");
- }
-
- Message param = (Message) params[1];
- builder.setRequestProto(param.toByteString());
-
- rpcRequest = builder.build();
- return rpcRequest;
- }
-
- @Override
- public Object invoke(Object proxy, Method method, Object[] args)
- throws Throwable {
- long startTime = 0;
- if (LOG.isDebugEnabled()) {
- startTime = System.currentTimeMillis();
- }
-
- ProtoSpecificRpcRequest rpcRequest = constructRpcRequest(method, args);
- ProtoSpecificResponseWritable val = null;
- try {
- val = (ProtoSpecificResponseWritable) client.call(
- new ProtoSpecificRequestWritable(rpcRequest), remoteId);
- } catch (Exception e) {
- throw new ServiceException(e);
- }
-
- ProtoSpecificRpcResponse response = val.message;
-
- if (LOG.isDebugEnabled()) {
- long callTime = System.currentTimeMillis() - startTime;
- LOG.debug("Call: " + method.getName() + " " + callTime);
- }
-
- if (response.hasIsError() && response.getIsError() == true) {
- YarnRemoteExceptionPBImpl exception = new YarnRemoteExceptionPBImpl(response.getException());
- exception.fillInStackTrace();
- ServiceException se = new ServiceException(exception);
- throw se;
- }
-
- Message prototype = null;
- try {
- prototype = getReturnProtoType(method);
- } catch (Exception e) {
- throw new ServiceException(e);
- }
- Message actualReturnMessage = prototype.newBuilderForType()
- .mergeFrom(response.getResponseProto()).build();
- return actualReturnMessage;
- }
-
- @Override
- public void close() throws IOException {
- if (!isClosed) {
- isClosed = true;
- CLIENTS.stopClient(client);
- }
- }
-
- private Message getReturnProtoType(Method method) throws Exception {
- if (returnTypes.containsKey(method.getName())) {
- return returnTypes.get(method.getName());
- } else {
- Class> returnType = method.getReturnType();
-
- Method newInstMethod = returnType.getMethod("getDefaultInstance");
- newInstMethod.setAccessible(true);
- Message prototype = (Message) newInstMethod.invoke(null,
- (Object[]) null);
- returnTypes.put(method.getName(), prototype);
- return prototype;
- }
- }
- }
-
- /**
- * Writable Wrapper for Protocol Buffer Requests
- */
- private static class ProtoSpecificRequestWritable implements Writable {
- ProtoSpecificRpcRequest message;
-
- @SuppressWarnings("unused")
- public ProtoSpecificRequestWritable() {
- }
-
- ProtoSpecificRequestWritable(ProtoSpecificRpcRequest message) {
- this.message = message;
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- ((Message)message).writeDelimitedTo(
- DataOutputOutputStream.constructOutputStream(out));
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- int length = ProtoUtil.readRawVarint32(in);
- byte[] bytes = new byte[length];
- in.readFully(bytes);
- message = ProtoSpecificRpcRequest.parseFrom(bytes);
- }
- }
-
- /**
- * Writable Wrapper for Protocol Buffer Responses
- */
- public static class ProtoSpecificResponseWritable implements Writable {
- ProtoSpecificRpcResponse message;
-
- public ProtoSpecificResponseWritable() {
- }
-
- public ProtoSpecificResponseWritable(ProtoSpecificRpcResponse message) {
- this.message = message;
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- ((Message)message).writeDelimitedTo(
- DataOutputOutputStream.constructOutputStream(out));
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- int length = ProtoUtil.readRawVarint32(in);
- byte[] bytes = new byte[length];
- in.readFully(bytes);
- message = ProtoSpecificRpcResponse.parseFrom(bytes);
- }
- }
-
- @Override
- public Object[] call(Method method, Object[][] params,
- InetSocketAddress[] addrs, UserGroupInformation ticket, Configuration conf)
- throws IOException, InterruptedException {
- throw new UnsupportedOperationException();
- }
-
- // for unit testing only
- @InterfaceAudience.Private
- @InterfaceStability.Unstable
- static Client getClient(Configuration conf) {
- return CLIENTS.getClient(conf, SocketFactory.getDefault(),
- ProtoSpecificResponseWritable.class);
- }
-
- public static class Server extends RPC.Server {
-
- private BlockingService service;
- private boolean verbose;
-//
-// /**
-// * Construct an RPC server.
-// *
-// * @param instance
-// * the instance whose methods will be called
-// * @param conf
-// * the configuration to use
-// * @param bindAddress
-// * the address to bind on to listen for connection
-// * @param port
-// * the port to listen for connections on
-// */
-// public Server(Object instance, Configuration conf, String bindAddress,
-// int port) throws IOException {
-// this(instance, conf, bindAddress, port, 1, false, null);
-// }
-
- private static String classNameBase(String className) {
- String[] names = className.split("\\.", -1);
- if (names == null || names.length == 0) {
- return className;
- }
- return names[names.length - 1];
- }
-
- /**
- * Construct an RPC server.
- *
- * @param instance
- * the instance whose methods will be called
- * @param conf
- * the configuration to use
- * @param bindAddress
- * the address to bind on to listen for connection
- * @param port
- * the port to listen for connections on
- * @param numHandlers
- * the number of method handler threads to run
- * @param verbose
- * whether each call should be logged
- */
- public Server(Object instance, Configuration conf, String bindAddress,
- int port, int numHandlers, int numReaders,
- int queueSizePerHandler, boolean verbose,
- SecretManager extends TokenIdentifier> secretManager)
- throws IOException {
- super(bindAddress, port, ProtoSpecificRequestWritable.class, numHandlers,
- numReaders, queueSizePerHandler, conf, classNameBase(instance.getClass().getName()), secretManager);
- this.service = (BlockingService) instance;
- this.verbose = verbose;
- }
-
- @Override
- public Writable call(RpcKind rpcKind, String protocol,
- Writable writableRequest, long receiveTime) throws IOException {
- ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest;
- ProtoSpecificRpcRequest rpcRequest = request.message;
- String methodName = rpcRequest.getMethodName();
- if (verbose) {
- log("Call: protocol=" + protocol + ", method="
- + methodName);
- }
- MethodDescriptor methodDescriptor = service.getDescriptorForType()
- .findMethodByName(methodName);
- if (methodDescriptor == null) {
- String msg = "Unknown method " + methodName + " called on "
- + protocol + " protocol.";
- LOG.warn(msg);
- return handleException(new IOException(msg));
- }
- Message prototype = service.getRequestPrototype(methodDescriptor);
- Message param = prototype.newBuilderForType()
- .mergeFrom(rpcRequest.getRequestProto()).build();
- Message result;
- try {
- result = service.callBlockingMethod(methodDescriptor, null, param);
- } catch (ServiceException e) {
- e.printStackTrace();
- return handleException(e);
- } catch (Exception e) {
- return handleException(e);
- }
-
- ProtoSpecificRpcResponse response = constructProtoSpecificRpcSuccessResponse(result);
- return new ProtoSpecificResponseWritable(response);
- }
-
- private ProtoSpecificResponseWritable handleException(Throwable e) {
- ProtoSpecificRpcResponse.Builder builder = ProtoSpecificRpcResponse
- .newBuilder();
- builder.setIsError(true);
- if (e.getCause() instanceof YarnRemoteExceptionPBImpl) {
- builder.setException(((YarnRemoteExceptionPBImpl) e.getCause())
- .getProto());
- } else {
- builder.setException(new YarnRemoteExceptionPBImpl(e).getProto());
- }
- ProtoSpecificRpcResponse response = builder.build();
- return new ProtoSpecificResponseWritable(response);
- }
-
- private ProtoSpecificRpcResponse constructProtoSpecificRpcSuccessResponse(
- Message message) {
- ProtoSpecificRpcResponse res = ProtoSpecificRpcResponse.newBuilder()
- .setResponseProto(message.toByteString()).build();
- return res;
- }
- }
-
- private static void log(String value) {
- if (value != null && value.length() > 55)
- value = value.substring(0, 55) + "...";
- LOG.info(value);
- }
-
- @Override
- public RPC.Server getServer(Class> protocol, Object instance,
- String bindAddress, int port, int numHandlers,int numReaders,
- int queueSizePerHandler, boolean verbose,
- Configuration conf, SecretManager extends TokenIdentifier> secretManager)
- throws IOException {
- return new Server(instance, conf, bindAddress, port, numHandlers, numReaders, queueSizePerHandler,
- verbose, secretManager);
- }
-}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java
index aaf5ff0be7..c7112e3c01 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.yarn.proto.ContainerManager;
+import org.apache.hadoop.yarn.api.ContainerManagerPB;
public class ContainerManagerSecurityInfo extends SecurityInfo {
@@ -38,7 +38,7 @@ public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
@Override
public TokenInfo getTokenInfo(Class> protocol, Configuration conf) {
if (!protocol
- .equals(ContainerManager.ContainerManagerService.BlockingInterface.class)) {
+ .equals(ContainerManagerPB.class)) {
return null;
}
return new TokenInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java
index 9f63b5f1aa..583e2f46e1 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.yarn.proto.AMRMProtocol;
+import org.apache.hadoop.yarn.api.AMRMProtocolPB;
public class SchedulerSecurityInfo extends SecurityInfo {
@@ -37,7 +37,7 @@ public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
@Override
public TokenInfo getTokenInfo(Class> protocol, Configuration conf) {
- if (!protocol.equals(AMRMProtocol.AMRMProtocolService.BlockingInterface.class)) {
+ if (!protocol.equals(AMRMProtocolPB.class)) {
return null;
}
return new TokenInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java
index 0a21c902b5..0e1e6781a5 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java
@@ -26,15 +26,15 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.yarn.api.ClientRMProtocolPB;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.proto.ClientRMProtocol;
public class ClientRMSecurityInfo extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
if (!protocol
- .equals(ClientRMProtocol.ClientRMProtocolService.BlockingInterface.class)) {
+ .equals(ClientRMProtocolPB.class)) {
return null;
}
return new KerberosInfo() {
@@ -59,7 +59,7 @@ public String clientPrincipal() {
@Override
public TokenInfo getTokenInfo(Class> protocol, Configuration conf) {
if (!protocol
- .equals(ClientRMProtocol.ClientRMProtocolService.BlockingInterface.class)) {
+ .equals(ClientRMProtocolPB.class)) {
return null;
}
return new TokenInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/proxy/.keep b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/proxy/.keep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0a419a4488..686cb49210 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -477,8 +477,7 @@
The address for the web proxy as HOST:PORT, if this is not
- given or if it matches yarn.resourcemanager.address then the proxy will
- run as part of the RM
+ given then the proxy will run as part of the RM
yarn.web-proxy.address
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
index e007ad6fc6..2c127cc6a2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -22,11 +22,14 @@
import junit.framework.Assert;
-import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.ContainerManagerPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
@@ -84,6 +87,8 @@ public void testUnknownCall() {
"Unknown method getNewApplication called on.*"
+ "org.apache.hadoop.yarn.proto.ClientRMProtocol"
+ "\\$ClientRMProtocolService\\$BlockingInterface protocol."));
+ } catch (Exception e) {
+ e.printStackTrace();
}
}
@@ -101,6 +106,7 @@ private void test(String rpcClass) throws Exception {
Server server = rpc.getServer(ContainerManager.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
+ RPC.setProtocolEngine(conf, ContainerManagerPB.class, ProtobufRpcEngine.class);
ContainerManager proxy = (ContainerManager)
rpc.getProxy(ContainerManager.class,
NetUtils.createSocketAddr("localhost:" + server.getPort()), conf);
@@ -144,11 +150,11 @@ private void test(String rpcClass) throws Exception {
proxy.stopContainer(stopRequest);
} catch (YarnRemoteException e) {
exception = true;
- System.err.println(e.getMessage());
- System.err.println(e.getCause().getMessage());
- Assert.assertTrue(EXCEPTION_MSG.equals(e.getMessage()));
- Assert.assertTrue(EXCEPTION_CAUSE.equals(e.getCause().getMessage()));
+ Assert.assertTrue(e.getMessage().contains(EXCEPTION_MSG));
+ Assert.assertTrue(e.getMessage().contains(EXCEPTION_CAUSE));
System.out.println("Test Exception is " + RPCUtil.toString(e));
+ } catch (Exception ex) {
+ ex.printStackTrace();
}
Assert.assertTrue(exception);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java
index 1b23b77322..f4940398fe 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java
@@ -25,13 +25,13 @@
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.proto.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
public class RMNMSecurityInfoClass extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
- if (!protocol.equals(ResourceTracker.ResourceTrackerService.BlockingInterface.class)) {
+ if (!protocol.equals(ResourceTrackerPB.class)) {
return null;
}
return new KerberosInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTrackerPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTrackerPB.java
new file mode 100644
index 0000000000..840976c805
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTrackerPB.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService;
+
+@ProtocolInfo(
+ protocolName = "org.apache.hadoop.yarn.server.api.ResourceTrackerPB",
+ protocolVersion = 1)
+public interface ResourceTrackerPB extends ResourceTrackerService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
index d2160d1977..88c3b0d524 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
@@ -19,17 +19,17 @@
package org.apache.hadoop.yarn.server.api.impl.pb.client;
import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
@@ -43,12 +43,12 @@
public class ResourceTrackerPBClientImpl implements ResourceTracker {
-private ResourceTrackerService.BlockingInterface proxy;
+private ResourceTrackerPB proxy;
public ResourceTrackerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
- RPC.setProtocolEngine(conf, ResourceTrackerService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
- proxy = (ResourceTrackerService.BlockingInterface)RPC.getProxy(
- ResourceTrackerService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, ResourceTrackerPB.class, ProtobufRpcEngine.class);
+ proxy = (ResourceTrackerPB)RPC.getProxy(
+ ResourceTrackerPB.class, clientVersion, addr, conf);
}
@Override
@@ -58,13 +58,7 @@ public RegisterNodeManagerResponse registerNodeManager(
try {
return new RegisterNodeManagerResponsePBImpl(proxy.registerNodeManager(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -75,13 +69,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
try {
return new NodeHeartbeatResponsePBImpl(proxy.nodeHeartbeat(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
index d4d20bf59b..18c5dcb61b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
@@ -19,12 +19,12 @@
package org.apache.hadoop.yarn.server.api.impl.pb.service;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService.BlockingInterface;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
+import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
@@ -35,7 +35,7 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-public class ResourceTrackerPBServiceImpl implements BlockingInterface {
+public class ResourceTrackerPBServiceImpl implements ResourceTrackerPB {
private ResourceTracker real;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocolPB.java
new file mode 100644
index 0000000000..4bfa9f22c9
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocolPB.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService;
+
+@ProtocolInfo(protocolName = "org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB",
+ protocolVersion = 1)
+public interface LocalizationProtocolPB extends LocalizationProtocolService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java
index 1cd981cfee..80b3f79869 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java
@@ -18,32 +18,31 @@
package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.client;
import java.io.IOException;
-
-import java.lang.reflect.UndeclaredThrowableException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerStatusPBImpl;
-import static org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
import com.google.protobuf.ServiceException;
public class LocalizationProtocolPBClientImpl implements LocalizationProtocol {
- private LocalizationProtocolService.BlockingInterface proxy;
+ private LocalizationProtocolPB proxy;
public LocalizationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
- RPC.setProtocolEngine(conf, LocalizationProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);
- proxy = (LocalizationProtocolService.BlockingInterface)RPC.getProxy(
- LocalizationProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, LocalizationProtocolPB.class, ProtobufRpcEngine.class);
+ proxy = (LocalizationProtocolPB)RPC.getProxy(
+ LocalizationProtocolPB.class, clientVersion, addr, conf);
}
@Override
@@ -54,13 +53,7 @@ public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status)
return new LocalizerHeartbeatResponsePBImpl(
proxy.heartbeat(null, statusProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java
index d69a4f95c3..31111d30f3 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java
@@ -24,13 +24,13 @@
import com.google.protobuf.ServiceException;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService.BlockingInterface;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
-public class LocalizationProtocolPBServiceImpl implements BlockingInterface {
+public class LocalizationProtocolPBServiceImpl implements LocalizationProtocolPB {
private LocalizationProtocol real;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
index 050b9922a3..04fec51241 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.yarn.proto.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
public class LocalizerSecurityInfo extends SecurityInfo {
@@ -38,7 +38,7 @@ public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
@Override
public TokenInfo getTokenInfo(Class> protocol, Configuration conf) {
if (!protocol
- .equals(LocalizationProtocol.LocalizationProtocolService.BlockingInterface.class)) {
+ .equals(LocalizationProtocolPB.class)) {
return null;
}
return new TokenInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
index 0f818bd3b2..11c470edf8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
@@ -21,9 +21,9 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
-import org.apache.hadoop.yarn.proto.ContainerManager;
+import org.apache.hadoop.yarn.api.ContainerManagerPB;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.proto.LocalizationProtocol;
+import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
/**
* {@link PolicyProvider} for YARN NodeManager protocols.
@@ -36,9 +36,9 @@ public class NMPolicyProvider extends PolicyProvider {
new Service[] {
new Service(
YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGER,
- ContainerManager.ContainerManagerService.BlockingInterface.class),
+ ContainerManagerPB.class),
new Service(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER,
- LocalizationProtocol.LocalizationProtocolService.BlockingInterface.class)
+ LocalizationProtocolPB.class)
};
@Override
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
new file mode 100644
index 0000000000..551189463f
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.api;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService;
+
+@ProtocolInfo(
+ protocolName = "org.apache.hadoop.yarn.server.nodemanager.api.RMAdminProtocolPB",
+ protocolVersion = 1)
+public interface RMAdminProtocolPB extends RMAdminProtocolService.BlockingInterface {
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
index cf2ce894ee..80df1b9c8c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
@@ -23,10 +23,10 @@
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
-import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService;
+import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
@@ -34,6 +34,7 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
@@ -64,14 +65,14 @@
public class RMAdminProtocolPBClientImpl implements RMAdminProtocol {
- private RMAdminProtocolService.BlockingInterface proxy;
+ private RMAdminProtocolPB proxy;
public RMAdminProtocolPBClientImpl(long clientVersion, InetSocketAddress addr,
Configuration conf) throws IOException {
- RPC.setProtocolEngine(conf, RMAdminProtocolService.BlockingInterface.class,
- ProtoOverHadoopRpcEngine.class);
- proxy = (RMAdminProtocolService.BlockingInterface)RPC.getProxy(
- RMAdminProtocolService.BlockingInterface.class, clientVersion, addr, conf);
+ RPC.setProtocolEngine(conf, RMAdminProtocolPB.class,
+ ProtobufRpcEngine.class);
+ proxy = (RMAdminProtocolPB)RPC.getProxy(
+ RMAdminProtocolPB.class, clientVersion, addr, conf);
}
@Override
@@ -83,13 +84,7 @@ public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
return new RefreshQueuesResponsePBImpl(
proxy.refreshQueues(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -102,13 +97,7 @@ public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
return new RefreshNodesResponsePBImpl(
proxy.refreshNodes(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -122,13 +111,7 @@ public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfigu
return new RefreshSuperUserGroupsConfigurationResponsePBImpl(
proxy.refreshSuperUserGroupsConfiguration(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -141,13 +124,7 @@ public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
return new RefreshUserToGroupsMappingsResponsePBImpl(
proxy.refreshUserToGroupsMappings(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -160,13 +137,7 @@ public RefreshAdminAclsResponse refreshAdminAcls(
return new RefreshAdminAclsResponsePBImpl(
proxy.refreshAdminAcls(null, requestProto));
} catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
}
}
@@ -175,18 +146,12 @@ public RefreshServiceAclsResponse refreshServiceAcls(
RefreshServiceAclsRequest request) throws YarnRemoteException {
RefreshServiceAclsRequestProto requestProto =
((RefreshServiceAclsRequestPBImpl)request).getProto();
- try {
- return new RefreshServiceAclsResponsePBImpl(
- proxy.refreshServiceAcls(null, requestProto));
- } catch (ServiceException e) {
- if (e.getCause() instanceof YarnRemoteException) {
- throw (YarnRemoteException)e.getCause();
- } else if (e.getCause() instanceof UndeclaredThrowableException) {
- throw (UndeclaredThrowableException)e.getCause();
- } else {
- throw new UndeclaredThrowableException(e);
- }
- }
+ try {
+ return new RefreshServiceAclsResponsePBImpl(proxy.refreshServiceAcls(
+ null, requestProto));
+ } catch (ServiceException e) {
+ throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e);
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
index f6b6760b53..948e86ee8f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
@@ -19,11 +19,11 @@
package org.apache.hadoop.yarn.server.resourcemanager.api.impl.pb.service;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService.BlockingInterface;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*;
import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
@@ -46,7 +46,7 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-public class RMAdminProtocolPBServiceImpl implements BlockingInterface {
+public class RMAdminProtocolPBServiceImpl implements RMAdminProtocolPB {
private RMAdminProtocol real;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java
index 48eda6930a..275da39334 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java
@@ -25,13 +25,13 @@
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.proto.RMAdminProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
public class AdminSecurityInfo extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class> protocol, Configuration conf) {
- if (!protocol.equals(RMAdminProtocol.RMAdminProtocolService.BlockingInterface.class)) {
+ if (!protocol.equals(RMAdminProtocolPB.class)) {
return null;
}
return new KerberosInfo() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
index 6fe2c1912e..ba58f3e0d3 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
@@ -21,12 +21,12 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.yarn.api.AMRMProtocolPB;
+import org.apache.hadoop.yarn.api.ClientRMProtocolPB;
+import org.apache.hadoop.yarn.api.ContainerManagerPB;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.proto.ContainerManager;
-import org.apache.hadoop.yarn.proto.ResourceTracker;
-import org.apache.hadoop.yarn.proto.RMAdminProtocol;
-import org.apache.hadoop.yarn.proto.ClientRMProtocol;
-import org.apache.hadoop.yarn.proto.AMRMProtocol;
+import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
+import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
/**
* {@link PolicyProvider} for YARN ResourceManager protocols.
@@ -39,19 +39,19 @@ public class RMPolicyProvider extends PolicyProvider {
new Service[] {
new Service(
YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER,
- ResourceTracker.ResourceTrackerService.BlockingInterface.class),
+ ResourceTrackerPB.class),
new Service(
YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CLIENT_RESOURCEMANAGER,
- ClientRMProtocol.ClientRMProtocolService.BlockingInterface.class),
+ ClientRMProtocolPB.class),
new Service(
YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_RESOURCEMANAGER,
- AMRMProtocol.AMRMProtocolService.BlockingInterface.class),
+ AMRMProtocolPB.class),
new Service(
YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_ADMIN,
- RMAdminProtocol.RMAdminProtocolService.BlockingInterface.class),
+ RMAdminProtocolPB.class),
new Service(
YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGER,
- ContainerManager.ContainerManagerService.BlockingInterface.class),
+ ContainerManagerPB.class),
};
@Override
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
index d7868d5199..5ac5deef33 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
@@ -235,10 +235,11 @@ public AMRMProtocol run() {
client.registerApplicationMaster(request);
Assert.fail("Should fail with authorization error");
} catch (YarnRemoteException e) {
- Assert.assertEquals("Unauthorized request from ApplicationMaster. "
- + "Expected ApplicationAttemptID: "
- + applicationAttemptId.toString() + " Found: "
- + otherAppAttemptId.toString(), e.getMessage());
+ Assert.assertTrue(e.getMessage().contains(
+ "Unauthorized request from ApplicationMaster. "
+ + "Expected ApplicationAttemptID: "
+ + applicationAttemptId.toString() + " Found: "
+ + otherAppAttemptId.toString()));
} finally {
rm.stop();
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
index d86673c6b3..ea27be32da 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
@@ -323,8 +323,10 @@ private void verifyEnemyAccess() throws Exception {
Assert.fail("App killing by the enemy should fail!!");
} catch (YarnRemoteException e) {
LOG.info("Got exception while killing app as the enemy", e);
- Assert.assertEquals("User enemy cannot perform operation MODIFY_APP on "
- + applicationId, e.getMessage());
+ Assert
+ .assertTrue(e.getMessage().contains(
+ "User enemy cannot perform operation MODIFY_APP on "
+ + applicationId));
}
rmClient.forceKillApplication(finishAppRequest);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 765234665f..226bccded5 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -247,10 +247,12 @@ public Void run() {
Assert.assertEquals(
java.lang.reflect.UndeclaredThrowableException.class
.getCanonicalName(), e.getClass().getCanonicalName());
- Assert.assertEquals(
- "DIGEST-MD5: digest response format violation. "
- + "Mismatched response.", e.getCause().getCause()
- .getMessage());
+ Assert.assertTrue(e
+ .getCause()
+ .getMessage()
+ .contains(
+ "DIGEST-MD5: digest response format violation. "
+ + "Mismatched response."));
}
return null;
}
@@ -468,9 +470,10 @@ void callWithIllegalContainerID(ContainerManager client,
+ "access is expected to fail.");
} catch (YarnRemoteException e) {
LOG.info("Got exception : ", e);
- Assert.assertEquals("Unauthorized request to start container. "
- + "\nExpected containerId: " + tokenId.getContainerID()
- + " Found: " + newContainerId.toString(), e.getMessage());
+ Assert.assertTrue(e.getMessage().contains(
+ "Unauthorized request to start container. "
+ + "\nExpected containerId: " + tokenId.getContainerID()
+ + " Found: " + newContainerId.toString()));
}
}
diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
index c90e17c6bf..fad34ab7df 100644
--- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
+++ b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java
@@ -156,10 +156,11 @@ public void map(LongWritable key, BytesWritable value, Context context)
FSDataOutputStream dos =
FileSystem.create(fs, path, new FsPermission((short)0755));
- for (long bytes = key.get(); bytes > 0; bytes -= val.getLength()) {
+ int size = 0;
+ for (long bytes = key.get(); bytes > 0; bytes -= size) {
r.nextBytes(val.getBytes());
- val.setSize((int)Math.min(val.getLength(), bytes));
- dos.write(val.getBytes(), 0, val.getLength());// Write to distCache file
+ size = (int)Math.min(val.getLength(), bytes);
+ dos.write(val.getBytes(), 0, size);// Write to distCache file
}
dos.close();
}
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b37b0bad95..69dd16bf92 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -283,7 +283,7 @@
commons-net
commons-net
- 1.4.1
+ 3.1
javax.servlet
|