diff --git a/CHANGES.txt b/CHANGES.txt index 40d6345e19..a3421b4f3f 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -68,6 +68,9 @@ Trunk (unreleased changes) HADOOP-5698. Change org.apache.hadoop.examples.MultiFileWordCount to use new mapreduce api. (Amareshwari Sriramadasu via sharad) + HADOOP-5913. Provide ability to an administrator to stop and start + job queues. (Rahul Kumar Singh and Hemanth Yamijala via yhemanth) + NEW FEATURES HADOOP-4268. Change fsck to use ClientProtocol methods so that the @@ -149,6 +152,9 @@ Trunk (unreleased changes) HADOOP-5170. Allows jobs to set max maps/reduces per-node and per-cluster. (Matei Zaharia via ddas) + HADOOP-5897. Add name-node metrics to capture java heap usage. + (Suresh Srinivas via shv) + IMPROVEMENTS HADOOP-4565. Added CombineFileInputFormat to use data locality information @@ -444,6 +450,12 @@ Trunk (unreleased changes) HADOOP-5938. Change org.apache.hadoop.mapred.jobcontrol to use new api. (Amareshwari Sriramadasu via sharad) + HADOOP-2141. Improves the speculative execution heuristic. The heuristic + is currently based on the progress-rates of tasks and the expected time + to complete. Also, statistics about trackers are collected, and speculative + tasks are not given to the ones deduced to be slow. + (Andy Konwinski and ddas) + OPTIMIZATIONS HADOOP-5595. NameNode does not need to run a replicator to choose a @@ -820,6 +832,9 @@ Trunk (unreleased changes) LD_LIBRARY_PATH and other environment variables. (Sreekanth Ramakrishnan via yhemanth) + HADOOP-4041. IsolationRunner does not work as documented. + (Philip Zeyliger via tomwhite) + Release 0.20.1 - Unreleased INCOMPATIBLE CHANGES @@ -948,6 +963,9 @@ Release 0.20.1 - Unreleased (usually HDFS) is started at nearly the same time as the JobTracker. (Amar Kamat via ddas) + HADOOP-5920. Fixes a testcase failure for TestJobHistory. + (Amar Kamat via ddas) + Release 0.20.0 - 2009-04-15 INCOMPATIBLE CHANGES @@ -2982,6 +3000,10 @@ Release 0.18.4 - Unreleased HADOOP-5644. Namenode is stuck in safe mode. (suresh Srinivas via hairong) + HADOOP-6017. Lease Manager in NameNode does not handle certain characters + in filenames. This results in fatal errors in Secondary NameNode and while + restrating NameNode. (Tsz Wo (Nicholas), SZE via rangadi) + Release 0.18.3 - 2009-01-27 IMPROVEMENTS diff --git a/src/java/org/apache/hadoop/fs/LocalDirAllocator.java b/src/java/org/apache/hadoop/fs/LocalDirAllocator.java index 5d04d280da..1aa4663b15 100644 --- a/src/java/org/apache/hadoop/fs/LocalDirAllocator.java +++ b/src/java/org/apache/hadoop/fs/LocalDirAllocator.java @@ -33,7 +33,7 @@ * files. The way it works is that it is kept track what disk was last * allocated for a file write. For the current request, the next disk from * the set of disks would be allocated if the free space on the disk is - * sufficient enough to accomodate the file that is being considered for + * sufficient enough to accommodate the file that is being considered for * creation. If the space requirements cannot be met, the next disk in order * would be tried and so on till a disk is found with sufficient capacity. * Once a disk with sufficient space is identified, a check is done to make @@ -69,6 +69,9 @@ public class LocalDirAllocator { new TreeMap(); private String contextCfgItemName; + /** Used when size of file to be allocated is unknown. */ + public static final int SIZE_UNKNOWN = -1; + /**Create an allocator object * @param contextCfgItemName */ @@ -105,10 +108,11 @@ private AllocatorPerContext obtainContext(String contextCfgItemName) { */ public Path getLocalPathForWrite(String pathStr, Configuration conf) throws IOException { - return getLocalPathForWrite(pathStr, -1, conf); + return getLocalPathForWrite(pathStr, SIZE_UNKNOWN, conf); } - /** Get a path from the local FS. Pass size as -1 if not known apriori. We + /** Get a path from the local FS. Pass size as + * SIZE_UNKNOWN if not known apriori. We * round-robin over the set of disks (via the configured dirs) and return * the first complete path which has enough space * @param pathStr the requested path (this will be created on the first @@ -274,7 +278,7 @@ int getCurrentDirectoryIndex() { */ public synchronized Path getLocalPathForWrite(String path, Configuration conf) throws IOException { - return getLocalPathForWrite(path, -1, conf); + return getLocalPathForWrite(path, SIZE_UNKNOWN, conf); } /** Get a path from the local FS. If size is known, we go @@ -296,7 +300,7 @@ public synchronized Path getLocalPathForWrite(String pathStr, long size, } Path returnPath = null; - if(size == -1) { //do roulette selection: pick dir with probability + if(size == SIZE_UNKNOWN) { //do roulette selection: pick dir with probability //proportional to available size long[] availableOnDisk = new long[dirDF.length]; long totalAvailable = 0; @@ -344,7 +348,8 @@ public synchronized Path getLocalPathForWrite(String pathStr, long size, "directory for " + pathStr); } - /** Creates a file on the local FS. Pass size as -1 if not known apriori. We + /** Creates a file on the local FS. Pass size as + * {@link LocalDirAllocator.SIZE_UNKNOWN} if not known apriori. We * round-robin over the set of disks (via the configured dirs) and return * a file on the first path which has enough space. The file is guaranteed * to go away when the JVM exits. diff --git a/src/java/org/apache/hadoop/util/StringUtils.java b/src/java/org/apache/hadoop/util/StringUtils.java index c327400546..2c1a5fe37b 100644 --- a/src/java/org/apache/hadoop/util/StringUtils.java +++ b/src/java/org/apache/hadoop/util/StringUtils.java @@ -677,4 +677,24 @@ public static String byteDesc(long len) { public static synchronized String limitDecimalTo2(double d) { return decimalFormat.format(d); } + + /** + * Concatenates strings, using a separator. + * + * @param separator Separator to join with. + * @param strings Strings to join. + */ + public static String join(CharSequence separator, Iterable strings) { + StringBuffer sb = new StringBuffer(); + boolean first = true; + for (String s : strings) { + if (first) { + first = false; + } else { + sb.append(separator); + } + sb.append(s); + } + return sb.toString(); + } } diff --git a/src/test/core/org/apache/hadoop/util/TestStringUtils.java b/src/test/core/org/apache/hadoop/util/TestStringUtils.java index e68609ae2f..7aa6a7a333 100644 --- a/src/test/core/org/apache/hadoop/util/TestStringUtils.java +++ b/src/test/core/org/apache/hadoop/util/TestStringUtils.java @@ -18,6 +18,9 @@ package org.apache.hadoop.util; +import java.util.ArrayList; +import java.util.List; + import junit.framework.TestCase; public class TestStringUtils extends TestCase { @@ -118,4 +121,15 @@ public void testTraditionalBinaryPrefix() throws Exception { assertEquals(-1259520L, StringUtils.TraditionalBinaryPrefix.string2long("-1230k")); assertEquals(956703965184L, StringUtils.TraditionalBinaryPrefix.string2long("891g")); } + + public void testJoin() { + List s = new ArrayList(); + s.add("a"); + s.add("b"); + s.add("c"); + assertEquals("", StringUtils.join(":", s.subList(0, 0))); + assertEquals("a", StringUtils.join(":", s.subList(0, 1))); + assertEquals("a:b", StringUtils.join(":", s.subList(0, 2))); + assertEquals("a:b:c", StringUtils.join(":", s.subList(0, 3))); + } }