diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 154f886ea6..3e43130ad0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1008,6 +1008,8 @@ Release 0.22.0 - Unreleased HADOOP-7325. The hadoop command should not accept class names starting with a hyphen. (Brock Noland via todd) + HADOOP-7772. javadoc the topology classes (stevel) + OPTIMIZATIONS HADOOP-6884. Add LOG.isDebugEnabled() guard for each LOG.debug(..). diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java index 5bd3ca35de..f29e53cfbf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java @@ -37,14 +37,18 @@ public class CachedDNSToSwitchMapping implements DNSToSwitchMapping { private Map cache = new ConcurrentHashMap(); protected DNSToSwitchMapping rawMapping; - + + /** + * cache a raw DNS mapping + * @param rawMapping the raw mapping to cache + */ public CachedDNSToSwitchMapping(DNSToSwitchMapping rawMapping) { this.rawMapping = rawMapping; } - /** - * Returns the hosts from 'names' that have not been cached previously + * @param names a list of hostnames to probe for being cached + * @return the hosts from 'names' that have not been cached previously */ private List getUncachedHosts(List names) { // find out all names without cached resolved location @@ -58,7 +62,12 @@ private List getUncachedHosts(List names) { } /** - * Caches the resolved hosts + * Caches the resolved host:rack mappings. The two list + * parameters must be of equal size. + * + * @param uncachedHosts a list of hosts that were uncached + * @param resolvedHosts a list of resolved host entries where the element + * at index(i) is the resolved value for the entry in uncachedHosts[i] */ private void cacheResolvedHosts(List uncachedHosts, List resolvedHosts) { @@ -71,8 +80,9 @@ private void cacheResolvedHosts(List uncachedHosts, } /** - * Returns the cached resolution of the list of hostnames/addresses. - * Returns null if any of the names are not currently in the cache + * @param names a list of hostnames to look up (can be be empty) + * @return the cached resolution of the list of hostnames/addresses. + * or null if any of the names are not currently in the cache */ private List getCachedHosts(List names) { List result = new ArrayList(names.size()); @@ -88,6 +98,7 @@ private List getCachedHosts(List names) { return result; } + @Override public List resolve(List names) { // normalize all input names to be in the form of IP addresses names = NetUtils.normalizeHostNames(names); @@ -97,12 +108,14 @@ public List resolve(List names) { return result; } - List uncachedHosts = this.getUncachedHosts(names); + List uncachedHosts = getUncachedHosts(names); // Resolve the uncached hosts List resolvedHosts = rawMapping.resolve(uncachedHosts); - this.cacheResolvedHosts(uncachedHosts, resolvedHosts); - return this.getCachedHosts(names); + //cache them + cacheResolvedHosts(uncachedHosts, resolvedHosts); + //now look up the entire list in the cache + return getCachedHosts(names); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java index f9816becb6..2a832f25f8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; /** - * An interface that should be implemented to allow pluggable + * An interface that must be implemented to allow pluggable * DNS-name/IP-address to RackID resolvers. * */ @@ -40,8 +40,9 @@ public interface DNSToSwitchMapping { * Note the hostname/ip-address is not part of the returned path. * The network topology of the cluster would determine the number of * components in the network path. - * @param names - * @return list of resolved network paths + * @param names the list of hosts to resolve (can be empty) + * @return list of resolved network paths. + * If names is empty, the returned list is also empty */ public List resolve(List names); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index a91f694ae3..1c93f41d62 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -45,8 +45,8 @@ public class NetworkTopology { public static final Log LOG = LogFactory.getLog(NetworkTopology.class); - /* Inner Node represent a switch/router of a data center or rack. - * Different from a leave node, it has non-null children. + /** InnerNode represents a switch/router of a data center or rack. + * Different from a leaf node, it has non-null children. */ private class InnerNode extends NodeBase { private ArrayList children=new ArrayList(); @@ -68,16 +68,16 @@ private class InnerNode extends NodeBase { super(name, location, parent, level); } - /** Get its children */ + /** @return its children */ Collection getChildren() {return children;} - /** Return the number of children this node has */ + /** @return the number of children this node has */ int getNumOfChildren() { return children.size(); } /** Judge if this node represents a rack - * Return true if it has no child or its children are not InnerNodes + * @return true if it has no child or its children are not InnerNodes */ boolean isRack() { if (children.isEmpty()) { @@ -225,7 +225,11 @@ boolean remove(Node n) { } } // end of remove - /** Given a node's string representation, return a reference to the node */ + /** Given a node's string representation, return a reference to the node + * @param loc string location of the form /rack/node + * @return null if the node is not found or the childnode is there but + * not an instance of {@link InnerNode} + */ private Node getLoc(String loc) { if (loc == null || loc.length() == 0) return this; @@ -246,7 +250,12 @@ private Node getLoc(String loc) { } /** get leafIndex leaf of this subtree - * if it is not in the excludedNode*/ + * if it is not in the excludedNode + * + * @param leafIndex an indexed leaf of the node + * @param excludedNode an excluded node (can be null) + * @return + */ private Node getLeaf(int leafIndex, Node excludedNode) { int count=0; // check if the excluded node a leaf @@ -297,9 +306,14 @@ int getNumOfLeaves() { return numOfLeaves; } } // end of InnerNode - - InnerNode clusterMap = new InnerNode(InnerNode.ROOT); // the root - private int numOfRacks = 0; // rack counter + + /** + * the root cluster map + */ + InnerNode clusterMap = new InnerNode(InnerNode.ROOT); + /** rack counter */ + private int numOfRacks = 0; + /** the lock used to manage access */ private ReadWriteLock netlock; public NetworkTopology() { @@ -308,8 +322,7 @@ public NetworkTopology() { /** Add a leaf node * Update node counter & rack counter if necessary - * @param node - * node to be added + * @param node node to be added; can be null * @exception IllegalArgumentException if add a node to a leave or node to be added is not a leaf */ @@ -342,9 +355,8 @@ public void add(Node node) { } /** Remove a node - * Update node counter & rack counter if necessary - * @param node - * node to be removed + * Update node counter and rack counter if necessary + * @param node node to be removed; can be null */ public void remove(Node node) { if (node==null) return; @@ -371,8 +383,7 @@ public void remove(Node node) { /** Check if the tree contains node node * - * @param node - * a node + * @param node a node * @return true if node is already in the tree; false otherwise */ public boolean contains(Node node) { @@ -380,10 +391,11 @@ public boolean contains(Node node) { netlock.readLock().lock(); try { Node parent = node.getParent(); - for(int level=node.getLevel(); parent!=null&&level>0; - parent=parent.getParent(), level--) { - if (parent == clusterMap) + for (int level = node.getLevel(); parent != null && level > 0; + parent = parent.getParent(), level--) { + if (parent == clusterMap) { return true; + } } } finally { netlock.readLock().unlock(); @@ -409,7 +421,7 @@ public Node getNode(String loc) { } } - /** Return the total number of racks */ + /** @return the total number of racks */ public int getNumOfRacks() { netlock.readLock().lock(); try { @@ -419,7 +431,7 @@ public int getNumOfRacks() { } } - /** Return the total number of nodes */ + /** @return the total number of leaf nodes */ public int getNumOfLeaves() { netlock.readLock().lock(); try { @@ -432,11 +444,11 @@ public int getNumOfLeaves() { /** Return the distance between two nodes * It is assumed that the distance from one node to its parent is 1 * The distance between two nodes is calculated by summing up their distances - * to their closest common ancestor. + * to their closest common ancestor. * @param node1 one node * @param node2 another node - * @return the distance between node1 and node2 - * node1 or node2 do not belong to the cluster + * @return the distance between node1 and node2 which is zero if they are the same + * or {@link Integer#MAX_VALUE} if node1 or node2 do not belong to the cluster */ public int getDistance(Node node1, Node node2) { if (node1 == node2) { @@ -477,8 +489,8 @@ public int getDistance(Node node1, Node node2) { } /** Check if two nodes are on the same rack - * @param node1 one node - * @param node2 another node + * @param node1 one node (can be null) + * @param node2 another node (can be null) * @return true if node1 and node2 are on the same rack; false otherwise * @exception IllegalArgumentException when either node1 or node2 is null, or * node1 or node2 do not belong to the cluster @@ -622,6 +634,8 @@ static private void swap(Node[] nodes, int i, int j) { * If neither local node or local rack node is found, put a random replica * location at position 0. * It leaves the rest nodes untouched. + * @param reader the node that wishes to read a block from one of the nodes + * @param nodes the list of nodes containing data for the reader */ public void pseudoSortByDistance( Node reader, Node[] nodes ) { int tempIndex = 0; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java index 57952c34ab..ac57ba4abc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java @@ -33,20 +33,31 @@ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public interface Node { - /** Return the string representation of this node's network location */ + /** @return the string representation of this node's network location */ public String getNetworkLocation(); - /** Set the node's network location */ + + /** Set this node's network location + * @param location the location + */ public void setNetworkLocation(String location); - /** Return this node's name */ + /** @return this node's name */ public String getName(); - /** Return this node's parent */ + + /** @return this node's parent */ public Node getParent(); - /** Set this node's parent */ + + /** Set this node's parent + * @param parent the parent + */ public void setParent(Node parent); - /** Return this node's level in the tree. + + /** @return this node's level in the tree. * E.g. the root of a tree returns 0 and its children return 1 */ public int getLevel(); - /** Set this node's level in the tree.*/ + + /** Set this node's level in the tree + * @param i the level + */ public void setLevel(int i); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java index 0df8b9b2d5..a8f278176a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java @@ -27,9 +27,12 @@ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public class NodeBase implements Node { + /** Path separator {@value} */ public final static char PATH_SEPARATOR = '/'; + /** Path separator as a string {@value} */ public final static String PATH_SEPARATOR_STR = "/"; - public final static String ROOT = ""; // string representation of root + /** string representation of root {@value} */ + public final static String ROOT = ""; protected String name; //host:port# protected String location; //string representation of this node's location @@ -55,7 +58,7 @@ public NodeBase(String path) { } /** Construct a node from its name and its location - * @param name this node's name + * @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR}) * @param location this node's location */ public NodeBase(String name, String location) { @@ -63,7 +66,7 @@ public NodeBase(String name, String location) { } /** Construct a node from its name and its location - * @param name this node's name + * @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR}) * @param location this node's location * @param parent this node's parent node * @param level this node's level in the tree @@ -74,7 +77,11 @@ public NodeBase(String name, String location, Node parent, int level) { this.level = level; } - /* set this node's name and location */ + /** + * set this node's name and location + * @param name the (nullable) name -which cannot contain the {@link #PATH_SEPARATOR} + * @param location the location + */ private void set(String name, String location) { if (name != null && name.contains(PATH_SEPARATOR_STR)) throw new IllegalArgumentException( @@ -83,27 +90,43 @@ private void set(String name, String location) { this.location = location; } - /** Return this node's name */ + /** @return this node's name */ + @Override public String getName() { return name; } - /** Return this node's network location */ + /** @return this node's network location */ + @Override public String getNetworkLocation() { return location; } - /** Set this node's network location */ + /** Set this node's network location + * @param location the location + */ + @Override public void setNetworkLocation(String location) { this.location = location; } - /** Return this node's path */ + /** + * Get the path of a node + * @param node a non-null node + * @return the path of a node + */ public static String getPath(Node node) { return node.getNetworkLocation()+PATH_SEPARATOR_STR+node.getName(); } - /** Return this node's string representation */ + /** @return this node's path as its string representation */ + @Override public String toString() { return getPath(this); } - /** Normalize a path */ - static public String normalize(String path) { + /** Normalize a path by stripping off any trailing {@link #PATH_SEPARATOR} + * @param path path to normalize. + * @return the normalised path + * If pathis null or empty {@link #ROOT} is returned + * @throws IllegalArgumentException if the first character of a non empty path + * is not {@link #PATH_SEPARATOR} + */ + public static String normalize(String path) { if (path == null || path.length() == 0) return ROOT; if (path.charAt(0) != PATH_SEPARATOR) { @@ -119,20 +142,28 @@ static public String normalize(String path) { return path; } - /** Return this node's parent */ + /** @return this node's parent */ + @Override public Node getParent() { return parent; } - /** Set this node's parent */ + /** Set this node's parent + * @param parent the parent + */ + @Override public void setParent(Node parent) { this.parent = parent; } - /** Return this node's level in the tree. + /** @return this node's level in the tree. * E.g. the root of a tree returns 0 and its children return 1 */ + @Override public int getLevel() { return level; } - /** Set this node's level in the tree */ + /** Set this node's level in the tree + * @param level the level + */ + @Override public void setLevel(int level) { this.level = level; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java index e3fe4581bb..c8b8db6412 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java @@ -32,7 +32,7 @@ /** * This class implements the {@link DNSToSwitchMapping} interface using a - * script configured via net.topology.script.file.name . + * script configured via the {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY} */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -42,50 +42,86 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping public ScriptBasedMapping() { super(new RawScriptBasedMapping()); } - - // script must accept at least this many args + + /** + * Minimum number of arguments: {@value} + */ static final int MIN_ALLOWABLE_ARGS = 1; - + + /** + * Default number of arguments: {@value} + */ static final int DEFAULT_ARG_COUNT = CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_DEFAULT; - + + /** + * key to the script filename {@value} + */ static final String SCRIPT_FILENAME_KEY = CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY ; - static final String SCRIPT_ARG_COUNT_KEY = + /** + * key to the argument count that the script supports + */ + static final String SCRIPT_ARG_COUNT_KEY = CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ; - + + /** + * Create an instance from the given configuration + * @param conf configuration + */ public ScriptBasedMapping(Configuration conf) { this(); setConf(conf); } - + + @Override public Configuration getConf() { return ((RawScriptBasedMapping)rawMapping).getConf(); } - + + @Override public void setConf(Configuration conf) { ((RawScriptBasedMapping)rawMapping).setConf(conf); } - + + /** + * This is the uncached script mapping that is fed into the cache managed + * by the superclass {@link CachedDNSToSwitchMapping} + */ private static final class RawScriptBasedMapping - implements DNSToSwitchMapping { - private String scriptName; - private Configuration conf; - private int maxArgs; //max hostnames per call of the script - private static Log LOG = - LogFactory.getLog(ScriptBasedMapping.class); - public void setConf (Configuration conf) { - this.scriptName = conf.get(SCRIPT_FILENAME_KEY); - this.maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT); - this.conf = conf; - } - public Configuration getConf () { - return conf; - } - - public RawScriptBasedMapping() {} - - public List resolve(List names) { + implements DNSToSwitchMapping { + private String scriptName; + private Configuration conf; + private int maxArgs; //max hostnames per call of the script + private static Log LOG = + LogFactory.getLog(ScriptBasedMapping.class); + + /** + * Set the configuration and + * @param conf extract the configuration parameters of interest + */ + public void setConf (Configuration conf) { + this.scriptName = conf.get(SCRIPT_FILENAME_KEY); + this.maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT); + this.conf = conf; + } + + /** + * Get the configuration + * @return the configuration + */ + public Configuration getConf () { + return conf; + } + + /** + * Constructor. The mapping is not ready to use until + * {@link #setConf(Configuration)} has been called + */ + public RawScriptBasedMapping() {} + + @Override + public List resolve(List names) { List m = new ArrayList(names.size()); if (names.isEmpty()) { @@ -123,45 +159,53 @@ public List resolve(List names) { return m; } - private String runResolveCommand(List args) { - int loopCount = 0; - if (args.size() == 0) { - return null; - } - StringBuilder allOutput = new StringBuilder(); - int numProcessed = 0; - if (maxArgs < MIN_ALLOWABLE_ARGS) { - LOG.warn("Invalid value " + Integer.toString(maxArgs) - + " for " + SCRIPT_ARG_COUNT_KEY + "; must be >= " - + Integer.toString(MIN_ALLOWABLE_ARGS)); - return null; - } - - while (numProcessed != args.size()) { - int start = maxArgs * loopCount; - List cmdList = new ArrayList(); - cmdList.add(scriptName); - for (numProcessed = start; numProcessed < (start + maxArgs) && - numProcessed < args.size(); numProcessed++) { - cmdList.add(args.get(numProcessed)); - } - File dir = null; - String userDir; - if ((userDir = System.getProperty("user.dir")) != null) { - dir = new File(userDir); - } - ShellCommandExecutor s = new ShellCommandExecutor( - cmdList.toArray(new String[0]), dir); - try { - s.execute(); - allOutput.append(s.getOutput() + " "); - } catch (Exception e) { - LOG.warn("Exception: ", e); + /** + * Build and execute the resolution command. The command is + * executed in the directory specified by the system property + * "user.dir" if set; otherwise the current working directory is used + * @param args a list of arguments + * @return null if the number of arguments is out of range, + * or the output of the command. + */ + private String runResolveCommand(List args) { + int loopCount = 0; + if (args.size() == 0) { return null; } - loopCount++; + StringBuilder allOutput = new StringBuilder(); + int numProcessed = 0; + if (maxArgs < MIN_ALLOWABLE_ARGS) { + LOG.warn("Invalid value " + Integer.toString(maxArgs) + + " for " + SCRIPT_ARG_COUNT_KEY + "; must be >= " + + Integer.toString(MIN_ALLOWABLE_ARGS)); + return null; + } + + while (numProcessed != args.size()) { + int start = maxArgs * loopCount; + List cmdList = new ArrayList(); + cmdList.add(scriptName); + for (numProcessed = start; numProcessed < (start + maxArgs) && + numProcessed < args.size(); numProcessed++) { + cmdList.add(args.get(numProcessed)); + } + File dir = null; + String userDir; + if ((userDir = System.getProperty("user.dir")) != null) { + dir = new File(userDir); + } + ShellCommandExecutor s = new ShellCommandExecutor( + cmdList.toArray(new String[0]), dir); + try { + s.execute(); + allOutput.append(s.getOutput() + " "); + } catch (Exception e) { + LOG.warn("Exception: ", e); + return null; + } + loopCount++; + } + return allOutput.toString(); } - return allOutput.toString(); - } } }