classes =
+ conf.getInstances("no.such.property", SampleInterface.class);
+ assertTrue(classes.isEmpty());
+
+ conf.set("empty.property", "");
+ classes = conf.getInstances("empty.property", SampleInterface.class);
+ assertTrue(classes.isEmpty());
+
+ conf.setStrings("some.classes",
+ SampleClass.class.getName(), AnotherClass.class.getName());
+ classes = conf.getInstances("some.classes", SampleInterface.class);
+ assertEquals(2, classes.size());
+
+ try {
+ conf.setStrings("some.classes",
+ SampleClass.class.getName(), AnotherClass.class.getName(),
+ String.class.getName());
+ conf.getInstances("some.classes", SampleInterface.class);
+ fail("java.lang.String does not implement SampleInterface");
+ } catch (RuntimeException e) {}
+
+ try {
+ conf.setStrings("some.classes",
+ SampleClass.class.getName(), AnotherClass.class.getName(),
+ "no.such.Class");
+ conf.getInstances("some.classes", SampleInterface.class);
+ fail("no.such.Class does not exist");
+ } catch (RuntimeException e) {}
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/conf/empty-configuration.xml b/src/test/core/org/apache/hadoop/conf/empty-configuration.xml
new file mode 100644
index 0000000000..a2086fa683
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/conf/empty-configuration.xml
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/src/test/core/org/apache/hadoop/filecache/TestDistributedCache.java b/src/test/core/org/apache/hadoop/filecache/TestDistributedCache.java
new file mode 100644
index 0000000000..2da7f0bc14
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/filecache/TestDistributedCache.java
@@ -0,0 +1,77 @@
+package org.apache.hadoop.filecache;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import junit.framework.TestCase;
+
+public class TestDistributedCache extends TestCase {
+
+ static final URI LOCAL_FS = URI.create("file:///");
+ private static String TEST_CACHE_BASE_DIR =
+ new Path(System.getProperty("test.build.data","/tmp/cachebasedir"))
+ .toString().replace(' ', '+');
+ private static String TEST_ROOT_DIR =
+ System.getProperty("test.build.data", "/tmp/distributedcache");
+ private static final int TEST_FILE_SIZE = 4 * 1024; // 4K
+ private static final int LOCAL_CACHE_LIMIT = 5 * 1024; //5K
+ private Configuration conf;
+ private Path firstCacheFile;
+ private Path secondCacheFile;
+ private FileSystem localfs;
+
+ /**
+ * @see TestCase#setUp()
+ */
+ @Override
+ protected void setUp() throws IOException {
+ conf = new Configuration();
+ conf.setLong("local.cache.size", LOCAL_CACHE_LIMIT);
+ localfs = FileSystem.get(LOCAL_FS, conf);
+ firstCacheFile = new Path(TEST_ROOT_DIR+"/firstcachefile");
+ secondCacheFile = new Path(TEST_ROOT_DIR+"/secondcachefile");
+ createTempFile(localfs, firstCacheFile);
+ createTempFile(localfs, secondCacheFile);
+ }
+
+ /** test delete cache */
+ public void testDeleteCache() throws Exception {
+ DistributedCache.getLocalCache(firstCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR),
+ false, System.currentTimeMillis(), new Path(TEST_ROOT_DIR));
+ DistributedCache.releaseCache(firstCacheFile.toUri(), conf);
+ //in above code,localized a file of size 4K and then release the cache which will cause the cache
+ //be deleted when the limit goes out. The below code localize another cache which's designed to
+ //sweep away the first cache.
+ DistributedCache.getLocalCache(secondCacheFile.toUri(), conf, new Path(TEST_CACHE_BASE_DIR),
+ false, System.currentTimeMillis(), new Path(TEST_ROOT_DIR));
+ FileStatus[] dirStatuses = localfs.listStatus(new Path(TEST_CACHE_BASE_DIR));
+ assertTrue("DistributedCache failed deleting old cache when the cache store is full.",
+ dirStatuses.length > 1);
+ }
+
+ private void createTempFile(FileSystem fs, Path p) throws IOException {
+ FSDataOutputStream out = fs.create(p);
+ byte[] toWrite = new byte[TEST_FILE_SIZE];
+ new Random().nextBytes(toWrite);
+ out.write(toWrite);
+ out.close();
+ FileSystem.LOG.info("created: " + p + ", size=" + TEST_FILE_SIZE);
+ }
+
+ /**
+ * @see TestCase#tearDown()
+ */
+ @Override
+ protected void tearDown() throws IOException {
+ localfs.delete(firstCacheFile, true);
+ localfs.delete(secondCacheFile, true);
+ localfs.close();
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/src/test/core/org/apache/hadoop/fs/FileSystemContractBaseTest.java
new file mode 100644
index 0000000000..8bdeb3bfd7
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -0,0 +1,471 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ *
+ * A collection of tests for the contract of the {@link FileSystem}.
+ * This test should be used for general-purpose implementations of
+ * {@link FileSystem}, that is, implementations that provide implementations
+ * of all of the functionality of {@link FileSystem}.
+ *
+ *
+ * To test a given {@link FileSystem} implementation create a subclass of this
+ * test and override {@link #setUp()} to initialize the fs
+ * {@link FileSystem} instance variable.
+ *
+ */
+public abstract class FileSystemContractBaseTest extends TestCase {
+
+ protected FileSystem fs;
+ private byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
+ {
+ for (int i = 0; i < data.length; i++) {
+ data[i] = (byte) (i % 10);
+ }
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ fs.delete(path("/test"), true);
+ }
+
+ protected int getBlockSize() {
+ return 1024;
+ }
+
+ protected String getDefaultWorkingDirectory() {
+ return "/user/" + System.getProperty("user.name");
+ }
+
+ protected boolean renameSupported() {
+ return true;
+ }
+
+ public void testFsStatus() throws Exception {
+ FsStatus fsStatus = fs.getStatus();
+ assertNotNull(fsStatus);
+ //used, free and capacity are non-negative longs
+ assertTrue(fsStatus.getUsed() >= 0);
+ assertTrue(fsStatus.getRemaining() >= 0);
+ assertTrue(fsStatus.getCapacity() >= 0);
+ }
+
+ public void testWorkingDirectory() throws Exception {
+
+ Path workDir = path(getDefaultWorkingDirectory());
+ assertEquals(workDir, fs.getWorkingDirectory());
+
+ fs.setWorkingDirectory(path("."));
+ assertEquals(workDir, fs.getWorkingDirectory());
+
+ fs.setWorkingDirectory(path(".."));
+ assertEquals(workDir.getParent(), fs.getWorkingDirectory());
+
+ Path relativeDir = path("hadoop");
+ fs.setWorkingDirectory(relativeDir);
+ assertEquals(relativeDir, fs.getWorkingDirectory());
+
+ Path absoluteDir = path("/test/hadoop");
+ fs.setWorkingDirectory(absoluteDir);
+ assertEquals(absoluteDir, fs.getWorkingDirectory());
+
+ }
+
+ public void testMkdirs() throws Exception {
+ Path testDir = path("/test/hadoop");
+ assertFalse(fs.exists(testDir));
+ assertFalse(fs.isFile(testDir));
+
+ assertTrue(fs.mkdirs(testDir));
+
+ assertTrue(fs.exists(testDir));
+ assertFalse(fs.isFile(testDir));
+
+ assertTrue(fs.mkdirs(testDir));
+
+ assertTrue(fs.exists(testDir));
+ assertFalse(fs.isFile(testDir));
+
+ Path parentDir = testDir.getParent();
+ assertTrue(fs.exists(parentDir));
+ assertFalse(fs.isFile(parentDir));
+
+ Path grandparentDir = parentDir.getParent();
+ assertTrue(fs.exists(grandparentDir));
+ assertFalse(fs.isFile(grandparentDir));
+
+ }
+
+ public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
+ Path testDir = path("/test/hadoop");
+ assertFalse(fs.exists(testDir));
+ assertTrue(fs.mkdirs(testDir));
+ assertTrue(fs.exists(testDir));
+
+ createFile(path("/test/hadoop/file"));
+
+ Path testSubDir = path("/test/hadoop/file/subdir");
+ try {
+ fs.mkdirs(testSubDir);
+ fail("Should throw IOException.");
+ } catch (IOException e) {
+ // expected
+ }
+ assertFalse(fs.exists(testSubDir));
+
+ Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
+ try {
+ fs.mkdirs(testDeepSubDir);
+ fail("Should throw IOException.");
+ } catch (IOException e) {
+ // expected
+ }
+ assertFalse(fs.exists(testDeepSubDir));
+
+ }
+
+ public void testGetFileStatusThrowsExceptionForNonExistentFile()
+ throws Exception {
+ try {
+ fs.getFileStatus(path("/test/hadoop/file"));
+ fail("Should throw FileNotFoundException");
+ } catch (FileNotFoundException e) {
+ // expected
+ }
+ }
+
+ public void testListStatusReturnsNullForNonExistentFile() throws Exception {
+ assertNull(fs.listStatus(path("/test/hadoop/file")));
+ }
+
+ public void testListStatus() throws Exception {
+ Path[] testDirs = { path("/test/hadoop/a"),
+ path("/test/hadoop/b"),
+ path("/test/hadoop/c/1"), };
+ assertFalse(fs.exists(testDirs[0]));
+
+ for (Path path : testDirs) {
+ assertTrue(fs.mkdirs(path));
+ }
+
+ FileStatus[] paths = fs.listStatus(path("/test"));
+ assertEquals(1, paths.length);
+ assertEquals(path("/test/hadoop"), paths[0].getPath());
+
+ paths = fs.listStatus(path("/test/hadoop"));
+ assertEquals(3, paths.length);
+ assertEquals(path("/test/hadoop/a"), paths[0].getPath());
+ assertEquals(path("/test/hadoop/b"), paths[1].getPath());
+ assertEquals(path("/test/hadoop/c"), paths[2].getPath());
+
+ paths = fs.listStatus(path("/test/hadoop/a"));
+ assertEquals(0, paths.length);
+ }
+
+ public void testWriteReadAndDeleteEmptyFile() throws Exception {
+ writeReadAndDelete(0);
+ }
+
+ public void testWriteReadAndDeleteHalfABlock() throws Exception {
+ writeReadAndDelete(getBlockSize() / 2);
+ }
+
+ public void testWriteReadAndDeleteOneBlock() throws Exception {
+ writeReadAndDelete(getBlockSize());
+ }
+
+ public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
+ writeReadAndDelete(getBlockSize() + (getBlockSize() / 2));
+ }
+
+ public void testWriteReadAndDeleteTwoBlocks() throws Exception {
+ writeReadAndDelete(getBlockSize() * 2);
+ }
+
+ private void writeReadAndDelete(int len) throws IOException {
+ Path path = path("/test/hadoop/file");
+
+ fs.mkdirs(path.getParent());
+
+ FSDataOutputStream out = fs.create(path, false,
+ fs.getConf().getInt("io.file.buffer.size", 4096),
+ (short) 1, getBlockSize());
+ out.write(data, 0, len);
+ out.close();
+
+ assertTrue("Exists", fs.exists(path));
+ assertEquals("Length", len, fs.getFileStatus(path).getLen());
+
+ FSDataInputStream in = fs.open(path);
+ byte[] buf = new byte[len];
+ in.readFully(0, buf);
+ in.close();
+
+ assertEquals(len, buf.length);
+ for (int i = 0; i < buf.length; i++) {
+ assertEquals("Position " + i, data[i], buf[i]);
+ }
+
+ assertTrue("Deleted", fs.delete(path, false));
+
+ assertFalse("No longer exists", fs.exists(path));
+
+ }
+
+ public void testOverwrite() throws IOException {
+ Path path = path("/test/hadoop/file");
+
+ fs.mkdirs(path.getParent());
+
+ createFile(path);
+
+ assertTrue("Exists", fs.exists(path));
+ assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
+
+ try {
+ fs.create(path, false);
+ fail("Should throw IOException.");
+ } catch (IOException e) {
+ // Expected
+ }
+
+ FSDataOutputStream out = fs.create(path, true);
+ out.write(data, 0, data.length);
+ out.close();
+
+ assertTrue("Exists", fs.exists(path));
+ assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
+
+ }
+
+ public void testWriteInNonExistentDirectory() throws IOException {
+ Path path = path("/test/hadoop/file");
+ assertFalse("Parent doesn't exist", fs.exists(path.getParent()));
+ createFile(path);
+
+ assertTrue("Exists", fs.exists(path));
+ assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
+ assertTrue("Parent exists", fs.exists(path.getParent()));
+ }
+
+ public void testDeleteNonExistentFile() throws IOException {
+ Path path = path("/test/hadoop/file");
+ assertFalse("Doesn't exist", fs.exists(path));
+ assertFalse("No deletion", fs.delete(path, true));
+ }
+
+ public void testDeleteRecursively() throws IOException {
+ Path dir = path("/test/hadoop");
+ Path file = path("/test/hadoop/file");
+ Path subdir = path("/test/hadoop/subdir");
+
+ createFile(file);
+ assertTrue("Created subdir", fs.mkdirs(subdir));
+
+ assertTrue("File exists", fs.exists(file));
+ assertTrue("Dir exists", fs.exists(dir));
+ assertTrue("Subdir exists", fs.exists(subdir));
+
+ try {
+ fs.delete(dir, false);
+ fail("Should throw IOException.");
+ } catch (IOException e) {
+ // expected
+ }
+ assertTrue("File still exists", fs.exists(file));
+ assertTrue("Dir still exists", fs.exists(dir));
+ assertTrue("Subdir still exists", fs.exists(subdir));
+
+ assertTrue("Deleted", fs.delete(dir, true));
+ assertFalse("File doesn't exist", fs.exists(file));
+ assertFalse("Dir doesn't exist", fs.exists(dir));
+ assertFalse("Subdir doesn't exist", fs.exists(subdir));
+ }
+
+ public void testDeleteEmptyDirectory() throws IOException {
+ Path dir = path("/test/hadoop");
+ assertTrue(fs.mkdirs(dir));
+ assertTrue("Dir exists", fs.exists(dir));
+ assertTrue("Deleted", fs.delete(dir, false));
+ assertFalse("Dir doesn't exist", fs.exists(dir));
+ }
+
+ public void testRenameNonExistentPath() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/path");
+ Path dst = path("/test/new/newpath");
+ rename(src, dst, false, false, false);
+ }
+
+ public void testRenameFileMoveToNonExistentDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/file");
+ createFile(src);
+ Path dst = path("/test/new/newfile");
+ rename(src, dst, false, true, false);
+ }
+
+ public void testRenameFileMoveToExistingDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/file");
+ createFile(src);
+ Path dst = path("/test/new/newfile");
+ fs.mkdirs(dst.getParent());
+ rename(src, dst, true, false, true);
+ }
+
+ public void testRenameFileAsExistingFile() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/file");
+ createFile(src);
+ Path dst = path("/test/new/newfile");
+ createFile(dst);
+ rename(src, dst, false, true, true);
+ }
+
+ public void testRenameFileAsExistingDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/file");
+ createFile(src);
+ Path dst = path("/test/new/newdir");
+ fs.mkdirs(dst);
+ rename(src, dst, true, false, true);
+ assertTrue("Destination changed",
+ fs.exists(path("/test/new/newdir/file")));
+ }
+
+ public void testRenameDirectoryMoveToNonExistentDirectory()
+ throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/dir");
+ fs.mkdirs(src);
+ Path dst = path("/test/new/newdir");
+ rename(src, dst, false, true, false);
+ }
+
+ public void testRenameDirectoryMoveToExistingDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/dir");
+ fs.mkdirs(src);
+ createFile(path("/test/hadoop/dir/file1"));
+ createFile(path("/test/hadoop/dir/subdir/file2"));
+
+ Path dst = path("/test/new/newdir");
+ fs.mkdirs(dst.getParent());
+ rename(src, dst, true, false, true);
+
+ assertFalse("Nested file1 exists",
+ fs.exists(path("/test/hadoop/dir/file1")));
+ assertFalse("Nested file2 exists",
+ fs.exists(path("/test/hadoop/dir/subdir/file2")));
+ assertTrue("Renamed nested file1 exists",
+ fs.exists(path("/test/new/newdir/file1")));
+ assertTrue("Renamed nested exists",
+ fs.exists(path("/test/new/newdir/subdir/file2")));
+ }
+
+ public void testRenameDirectoryAsExistingFile() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/dir");
+ fs.mkdirs(src);
+ Path dst = path("/test/new/newfile");
+ createFile(dst);
+ rename(src, dst, false, true, true);
+ }
+
+ public void testRenameDirectoryAsExistingDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/dir");
+ fs.mkdirs(src);
+ createFile(path("/test/hadoop/dir/file1"));
+ createFile(path("/test/hadoop/dir/subdir/file2"));
+
+ Path dst = path("/test/new/newdir");
+ fs.mkdirs(dst);
+ rename(src, dst, true, false, true);
+ assertTrue("Destination changed",
+ fs.exists(path("/test/new/newdir/dir")));
+ assertFalse("Nested file1 exists",
+ fs.exists(path("/test/hadoop/dir/file1")));
+ assertFalse("Nested file2 exists",
+ fs.exists(path("/test/hadoop/dir/subdir/file2")));
+ assertTrue("Renamed nested file1 exists",
+ fs.exists(path("/test/new/newdir/dir/file1")));
+ assertTrue("Renamed nested exists",
+ fs.exists(path("/test/new/newdir/dir/subdir/file2")));
+ }
+
+ public void testInputStreamClosedTwice() throws IOException {
+ //HADOOP-4760 according to Closeable#close() closing already-closed
+ //streams should have no effect.
+ Path src = path("/test/hadoop/file");
+ createFile(src);
+ FSDataInputStream in = fs.open(src);
+ in.close();
+ in.close();
+ }
+
+ public void testOutputStreamClosedTwice() throws IOException {
+ //HADOOP-4760 according to Closeable#close() closing already-closed
+ //streams should have no effect.
+ Path src = path("/test/hadoop/file");
+ FSDataOutputStream out = fs.create(src);
+ out.writeChar('H'); //write some data
+ out.close();
+ out.close();
+ }
+
+ protected Path path(String pathString) {
+ return new Path(pathString).makeQualified(fs);
+ }
+
+ protected void createFile(Path path) throws IOException {
+ FSDataOutputStream out = fs.create(path);
+ out.write(data, 0, data.length);
+ out.close();
+ }
+
+ private void rename(Path src, Path dst, boolean renameSucceeded,
+ boolean srcExists, boolean dstExists) throws IOException {
+ assertEquals("Rename result", renameSucceeded, fs.rename(src, dst));
+ assertEquals("Source exists", srcExists, fs.exists(src));
+ assertEquals("Destination exists", dstExists, fs.exists(dst));
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestChecksumFileSystem.java b/src/test/core/org/apache/hadoop/fs/TestChecksumFileSystem.java
new file mode 100644
index 0000000000..c55fc3ae41
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestChecksumFileSystem.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.net.URI;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.conf.Configuration;
+import junit.framework.TestCase;
+
+public class TestChecksumFileSystem extends TestCase {
+ public void testgetChecksumLength() throws Exception {
+ assertEquals(8, ChecksumFileSystem.getChecksumLength(0L, 512));
+ assertEquals(12, ChecksumFileSystem.getChecksumLength(1L, 512));
+ assertEquals(12, ChecksumFileSystem.getChecksumLength(512L, 512));
+ assertEquals(16, ChecksumFileSystem.getChecksumLength(513L, 512));
+ assertEquals(16, ChecksumFileSystem.getChecksumLength(1023L, 512));
+ assertEquals(16, ChecksumFileSystem.getChecksumLength(1024L, 512));
+ assertEquals(408, ChecksumFileSystem.getChecksumLength(100L, 1));
+ assertEquals(4000000000008L,
+ ChecksumFileSystem.getChecksumLength(10000000000000L, 10));
+ }
+
+ public void testVerifyChecksum() throws Exception {
+ String TEST_ROOT_DIR
+ = System.getProperty("test.build.data","build/test/data/work-dir/localfs");
+
+ Configuration conf = new Configuration();
+ LocalFileSystem localFs = FileSystem.getLocal(conf);
+ Path testPath = new Path(TEST_ROOT_DIR, "testPath");
+ Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11");
+ FSDataOutputStream fout = localFs.create(testPath);
+ fout.write("testing".getBytes());
+ fout.close();
+
+ fout = localFs.create(testPath11);
+ fout.write("testing you".getBytes());
+ fout.close();
+
+ localFs.delete(localFs.getChecksumFile(testPath), true);
+ assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
+
+ //copying the wrong checksum file
+ FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs,
+ localFs.getChecksumFile(testPath),false,true,conf);
+ assertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath)));
+
+ boolean errorRead = false;
+ try {
+ TestLocalFileSystem.readFile(localFs, testPath);
+ }catch(ChecksumException ie) {
+ errorRead = true;
+ }
+ assertTrue("error reading", errorRead);
+
+ //now setting verify false, the read should succeed
+ localFs.setVerifyChecksum(false);
+ String str = TestLocalFileSystem.readFile(localFs, testPath);
+ assertTrue("read", "testing".equals(str));
+
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestDFVariations.java b/src/test/core/org/apache/hadoop/fs/TestDFVariations.java
new file mode 100644
index 0000000000..3999050069
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestDFVariations.java
@@ -0,0 +1,63 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.fs;
+
+import junit.framework.TestCase;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.EnumSet;
+
+public class TestDFVariations extends TestCase {
+
+ public static class XXDF extends DF {
+ private final String osName;
+ public XXDF(String osName) throws IOException {
+ super(new File(System.getProperty("test.build.data","/tmp")), 0L);
+ this.osName = osName;
+ }
+ @Override
+ public DF.OSType getOSType() {
+ return DF.getOSType(osName);
+ }
+ @Override
+ protected String[] getExecString() {
+ switch(getOSType()) {
+ case OS_TYPE_AIX:
+ return new String[] { "echo", "IGNORE\n", "/dev/sda3",
+ "453115160", "400077240", "11%", "18", "skip%", "/foo/bar", "\n" };
+ default:
+ return new String[] { "echo", "IGNORE\n", "/dev/sda3",
+ "453115160", "53037920", "400077240", "11%", "/foo/bar", "\n" };
+ }
+ }
+ }
+
+ public void testOSParsing() throws Exception {
+ for (DF.OSType ost : EnumSet.allOf(DF.OSType.class)) {
+ XXDF df = new XXDF(ost.getId());
+ assertEquals(ost.getId() + " total", 453115160 * 1024L, df.getCapacity());
+ assertEquals(ost.getId() + " used", 53037920 * 1024L, df.getUsed());
+ assertEquals(ost.getId() + " avail", 400077240 * 1024L, df.getAvailable());
+ assertEquals(ost.getId() + " pcnt used", 11, df.getPercentUsed());
+ assertEquals(ost.getId() + " mount", "/foo/bar", df.getMount());
+ }
+ }
+
+}
+
diff --git a/src/test/core/org/apache/hadoop/fs/TestDU.java b/src/test/core/org/apache/hadoop/fs/TestDU.java
new file mode 100644
index 0000000000..6df487be55
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestDU.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import junit.framework.TestCase;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Random;
+
+/** This test makes sure that "DU" does not get to run on each call to getUsed */
+public class TestDU extends TestCase {
+ final static private File DU_DIR = new File(
+ System.getProperty("test.build.data","/tmp"), "dutmp");
+
+ public void setUp() throws IOException {
+ FileUtil.fullyDelete(DU_DIR);
+ assertTrue(DU_DIR.mkdirs());
+ }
+
+ public void tearDown() throws IOException {
+ FileUtil.fullyDelete(DU_DIR);
+ }
+
+ private void createFile(File newFile, int size) throws IOException {
+ // write random data so that filesystems with compression enabled (e.g., ZFS)
+ // can't compress the file
+ Random random = new Random();
+ byte[] data = new byte[size];
+ random.nextBytes(data);
+
+ newFile.createNewFile();
+ RandomAccessFile file = new RandomAccessFile(newFile, "rws");
+
+ file.write(data);
+
+ file.getFD().sync();
+ file.close();
+ }
+
+ /**
+ * Verify that du returns expected used space for a file.
+ * We assume here that if a file system crates a file of size
+ * that is a multiple of the block size in this file system,
+ * then the used size for the file will be exactly that size.
+ * This is true for most file systems.
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public void testDU() throws IOException, InterruptedException {
+ int writtenSize = 32*1024; // writing 32K
+ File file = new File(DU_DIR, "data");
+ createFile(file, writtenSize);
+
+ Thread.sleep(5000); // let the metadata updater catch up
+
+ DU du = new DU(file, 10000);
+ du.start();
+ long duSize = du.getUsed();
+ du.shutdown();
+
+ assertEquals(writtenSize, duSize);
+
+ //test with 0 interval, will not launch thread
+ du = new DU(file, 0);
+ du.start();
+ duSize = du.getUsed();
+ du.shutdown();
+
+ assertEquals(writtenSize, duSize);
+
+ //test without launching thread
+ du = new DU(file, 10000);
+ duSize = du.getUsed();
+
+ assertEquals(writtenSize, duSize);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestGetFileBlockLocations.java b/src/test/core/org/apache/hadoop/fs/TestGetFileBlockLocations.java
new file mode 100644
index 0000000000..c85cc98862
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestGetFileBlockLocations.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Testing the correctness of FileSystem.getFileBlockLocations.
+ */
+public class TestGetFileBlockLocations extends TestCase {
+ private static String TEST_ROOT_DIR =
+ System.getProperty("test.build.data", "/tmp/testGetFileBlockLocations");
+ private static final int FileLength = 4 * 1024 * 1024; // 4MB
+ private Configuration conf;
+ private Path path;
+ private FileSystem fs;
+ private Random random;
+
+ /**
+ * @see TestCase#setUp()
+ */
+ @Override
+ protected void setUp() throws IOException {
+ conf = new Configuration();
+ Path rootPath = new Path(TEST_ROOT_DIR);
+ path = new Path(rootPath, "TestGetFileBlockLocations");
+ fs = rootPath.getFileSystem(conf);
+ FSDataOutputStream fsdos = fs.create(path, true);
+ byte[] buffer = new byte[1024];
+ while (fsdos.getPos() < FileLength) {
+ fsdos.write(buffer);
+ }
+ fsdos.close();
+ random = new Random(System.nanoTime());
+ }
+
+ private void oneTest(int offBegin, int offEnd, FileStatus status)
+ throws IOException {
+ if (offBegin > offEnd) {
+ int tmp = offBegin;
+ offBegin = offEnd;
+ offEnd = tmp;
+ }
+ BlockLocation[] locations =
+ fs.getFileBlockLocations(status, offBegin, offEnd - offBegin);
+ if (offBegin < status.getLen()) {
+ Arrays.sort(locations, new Comparator() {
+
+ @Override
+ public int compare(BlockLocation arg0, BlockLocation arg1) {
+ long cmprv = arg0.getOffset() - arg1.getOffset();
+ if (cmprv < 0) return -1;
+ if (cmprv > 0) return 1;
+ cmprv = arg0.getLength() - arg1.getLength();
+ if (cmprv < 0) return -1;
+ if (cmprv > 0) return 1;
+ return 0;
+ }
+
+ });
+ offBegin = (int) Math.min(offBegin, status.getLen() - 1);
+ offEnd = (int) Math.min(offEnd, status.getLen());
+ BlockLocation first = locations[0];
+ BlockLocation last = locations[locations.length - 1];
+ assertTrue(first.getOffset() <= offBegin);
+ assertTrue(offEnd <= last.getOffset() + last.getLength());
+ } else {
+ assertTrue(locations.length == 0);
+ }
+ }
+ /**
+ * @see TestCase#tearDown()
+ */
+ @Override
+ protected void tearDown() throws IOException {
+ fs.delete(path, true);
+ fs.close();
+ }
+
+ public void testFailureNegativeParameters() throws IOException {
+ FileStatus status = fs.getFileStatus(path);
+ try {
+ BlockLocation[] locations = fs.getFileBlockLocations(status, -1, 100);
+ fail("Expecting exception being throw");
+ } catch (IllegalArgumentException e) {
+
+ }
+
+ try {
+ BlockLocation[] locations = fs.getFileBlockLocations(status, 100, -1);
+ fail("Expecting exception being throw");
+ } catch (IllegalArgumentException e) {
+
+ }
+ }
+
+ public void testGetFileBlockLocations1() throws IOException {
+ FileStatus status = fs.getFileStatus(path);
+ oneTest(0, (int) status.getLen(), status);
+ oneTest(0, (int) status.getLen() * 2, status);
+ oneTest((int) status.getLen() * 2, (int) status.getLen() * 4, status);
+ oneTest((int) status.getLen() / 2, (int) status.getLen() * 3, status);
+ for (int i = 0; i < 10; ++i) {
+ oneTest((int) status.getLen() * i / 10, (int) status.getLen() * (i + 1)
+ / 10, status);
+ }
+ }
+
+ public void testGetFileBlockLocations2() throws IOException {
+ FileStatus status = fs.getFileStatus(path);
+ for (int i = 0; i < 1000; ++i) {
+ int offBegin = random.nextInt((int) (2 * status.getLen()));
+ int offEnd = random.nextInt((int) (2 * status.getLen()));
+ oneTest(offBegin, offEnd, status);
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestGlobExpander.java b/src/test/core/org/apache/hadoop/fs/TestGlobExpander.java
new file mode 100644
index 0000000000..b0466b8022
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestGlobExpander.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+public class TestGlobExpander extends TestCase {
+
+ public void testExpansionIsIdentical() throws IOException {
+ checkExpansionIsIdentical("");
+ checkExpansionIsIdentical("/}");
+ checkExpansionIsIdentical("/}{a,b}");
+ checkExpansionIsIdentical("{/");
+ checkExpansionIsIdentical("{a}");
+ checkExpansionIsIdentical("{a,b}/{b,c}");
+ checkExpansionIsIdentical("p\\{a/b,c/d\\}s");
+ checkExpansionIsIdentical("p{a\\/b,c\\/d}s");
+ }
+
+ public void testExpansion() throws IOException {
+ checkExpansion("{a/b}", "a/b");
+ checkExpansion("/}{a/b}", "/}a/b");
+ checkExpansion("p{a/b,c/d}s", "pa/bs", "pc/ds");
+ checkExpansion("{a/b,c/d,{e,f}}", "a/b", "c/d", "{e,f}");
+ checkExpansion("{a/b,c/d}{e,f}", "a/b{e,f}", "c/d{e,f}");
+ checkExpansion("{a,b}/{b,{c/d,e/f}}", "{a,b}/b", "{a,b}/c/d", "{a,b}/e/f");
+ checkExpansion("{a,b}/{c/\\d}", "{a,b}/c/d");
+ }
+
+ private void checkExpansionIsIdentical(String filePattern) throws IOException {
+ checkExpansion(filePattern, filePattern);
+ }
+
+ private void checkExpansion(String filePattern, String... expectedExpansions)
+ throws IOException {
+ List actualExpansions = GlobExpander.expand(filePattern);
+ assertEquals("Different number of expansions", expectedExpansions.length,
+ actualExpansions.size());
+ for (int i = 0; i < expectedExpansions.length; i++) {
+ assertEquals("Expansion of " + filePattern, expectedExpansions[i],
+ actualExpansions.get(i));
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java b/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java
new file mode 100644
index 0000000000..eef90308aa
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java
@@ -0,0 +1,211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Shell;
+
+import junit.framework.TestCase;
+
+/** This test LocalDirAllocator works correctly;
+ * Every test case uses different buffer dirs to
+ * enforce the AllocatorPerContext initialization.
+ * This test does not run on Cygwin because under Cygwin
+ * a directory can be created in a read-only directory
+ * which breaks this test.
+ */
+public class TestLocalDirAllocator extends TestCase {
+ final static private Configuration conf = new Configuration();
+ final static private String BUFFER_DIR_ROOT = "build/test/temp";
+ final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT);
+ final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT);
+ final static private String BUFFER_DIR[] = new String[] {
+ BUFFER_DIR_ROOT+"/tmp0", BUFFER_DIR_ROOT+"/tmp1", BUFFER_DIR_ROOT+"/tmp2",
+ BUFFER_DIR_ROOT+"/tmp3", BUFFER_DIR_ROOT+"/tmp4", BUFFER_DIR_ROOT+"/tmp5",
+ BUFFER_DIR_ROOT+"/tmp6"};
+ final static private Path BUFFER_PATH[] = new Path[] {
+ new Path(BUFFER_DIR[0]), new Path(BUFFER_DIR[1]), new Path(BUFFER_DIR[2]),
+ new Path(BUFFER_DIR[3]), new Path(BUFFER_DIR[4]), new Path(BUFFER_DIR[5]),
+ new Path(BUFFER_DIR[6])};
+ final static private String CONTEXT = "dfs.client.buffer.dir";
+ final static private String FILENAME = "block";
+ final static private LocalDirAllocator dirAllocator =
+ new LocalDirAllocator(CONTEXT);
+ static LocalFileSystem localFs;
+ final static private boolean isWindows =
+ System.getProperty("os.name").startsWith("Windows");
+ final static int SMALL_FILE_SIZE = 100;
+ static {
+ try {
+ localFs = FileSystem.getLocal(conf);
+ rmBufferDirs();
+ } catch(IOException e) {
+ System.out.println(e.getMessage());
+ e.printStackTrace();
+ System.exit(-1);
+ }
+ }
+
+ private static void rmBufferDirs() throws IOException {
+ assertTrue(!localFs.exists(BUFFER_PATH_ROOT) ||
+ localFs.delete(BUFFER_PATH_ROOT, true));
+ }
+
+ private void validateTempDirCreation(int i) throws IOException {
+ File result = createTempFile(SMALL_FILE_SIZE);
+ assertTrue("Checking for " + BUFFER_DIR[i] + " in " + result + " - FAILED!",
+ result.getPath().startsWith(new File(BUFFER_DIR[i], FILENAME).getPath()));
+ }
+
+ private File createTempFile() throws IOException {
+ File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
+ result.delete();
+ return result;
+ }
+
+ private File createTempFile(long size) throws IOException {
+ File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
+ result.delete();
+ return result;
+ }
+
+ /** Two buffer dirs. The first dir does not exist & is on a read-only disk;
+ * The second dir exists & is RW
+ * @throws Exception
+ */
+ public void test0() throws Exception {
+ if (isWindows) return;
+ try {
+ conf.set(CONTEXT, BUFFER_DIR[0]+","+BUFFER_DIR[1]);
+ assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
+ BUFFER_ROOT.setReadOnly();
+ validateTempDirCreation(1);
+ validateTempDirCreation(1);
+ } finally {
+ Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
+ rmBufferDirs();
+ }
+ }
+
+ /** Two buffer dirs. The first dir exists & is on a read-only disk;
+ * The second dir exists & is RW
+ * @throws Exception
+ */
+ public void test1() throws Exception {
+ if (isWindows) return;
+ try {
+ conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]);
+ assertTrue(localFs.mkdirs(BUFFER_PATH[2]));
+ BUFFER_ROOT.setReadOnly();
+ validateTempDirCreation(2);
+ validateTempDirCreation(2);
+ } finally {
+ Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
+ rmBufferDirs();
+ }
+ }
+ /** Two buffer dirs. Both do not exist but on a RW disk.
+ * Check if tmp dirs are allocated in a round-robin
+ */
+ public void test2() throws Exception {
+ if (isWindows) return;
+ try {
+ conf.set(CONTEXT, BUFFER_DIR[2]+","+BUFFER_DIR[3]);
+
+ // create the first file, and then figure the round-robin sequence
+ createTempFile(SMALL_FILE_SIZE);
+ int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3;
+ int secondDirIdx = (firstDirIdx == 2) ? 3 : 2;
+
+ // check if tmp dirs are allocated in a round-robin manner
+ validateTempDirCreation(firstDirIdx);
+ validateTempDirCreation(secondDirIdx);
+ validateTempDirCreation(firstDirIdx);
+ } finally {
+ rmBufferDirs();
+ }
+ }
+
+ /** Two buffer dirs. Both exists and on a R/W disk.
+ * Later disk1 becomes read-only.
+ * @throws Exception
+ */
+ public void test3() throws Exception {
+ if (isWindows) return;
+ try {
+ conf.set(CONTEXT, BUFFER_DIR[3]+","+BUFFER_DIR[4]);
+ assertTrue(localFs.mkdirs(BUFFER_PATH[3]));
+ assertTrue(localFs.mkdirs(BUFFER_PATH[4]));
+
+ // create the first file with size, and then figure the round-robin sequence
+ createTempFile(SMALL_FILE_SIZE);
+
+ int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
+ validateTempDirCreation(nextDirIdx);
+
+ // change buffer directory 2 to be read only
+ new File(BUFFER_DIR[4]).setReadOnly();
+ validateTempDirCreation(3);
+ validateTempDirCreation(3);
+ } finally {
+ rmBufferDirs();
+ }
+ }
+
+ /**
+ * Two buffer dirs, on read-write disk.
+ *
+ * Try to create a whole bunch of files.
+ * Verify that they do indeed all get created where they should.
+ *
+ * Would ideally check statistical properties of distribution, but
+ * we don't have the nerve to risk false-positives here.
+ *
+ * @throws Exception
+ */
+ static final int TRIALS = 100;
+ public void test4() throws Exception {
+ if (isWindows) return;
+ try {
+
+ conf.set(CONTEXT, BUFFER_DIR[5]+","+BUFFER_DIR[6]);
+ assertTrue(localFs.mkdirs(BUFFER_PATH[5]));
+ assertTrue(localFs.mkdirs(BUFFER_PATH[6]));
+
+ int inDir5=0, inDir6=0;
+ for(int i = 0; i < TRIALS; ++i) {
+ File result = createTempFile();
+ if(result.getPath().startsWith(new File(BUFFER_DIR[5], FILENAME).getPath())) {
+ inDir5++;
+ } else if(result.getPath().startsWith(new File(BUFFER_DIR[6], FILENAME).getPath())) {
+ inDir6++;
+ }
+ result.delete();
+ }
+
+ assertTrue( inDir5 + inDir6 == TRIALS);
+
+ } finally {
+ rmBufferDirs();
+ }
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestLocalFileSystem.java b/src/test/core/org/apache/hadoop/fs/TestLocalFileSystem.java
new file mode 100644
index 0000000000..b244b9b5df
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import java.io.*;
+import junit.framework.*;
+
+/**
+ * This class tests the local file system via the FileSystem abstraction.
+ */
+public class TestLocalFileSystem extends TestCase {
+ private static String TEST_ROOT_DIR
+ = System.getProperty("test.build.data","build/test/data/work-dir/localfs");
+
+
+ static void writeFile(FileSystem fs, Path name) throws IOException {
+ FSDataOutputStream stm = fs.create(name);
+ stm.writeBytes("42\n");
+ stm.close();
+ }
+
+ static String readFile(FileSystem fs, Path name) throws IOException {
+ byte[] b = new byte[1024];
+ int offset = 0;
+ FSDataInputStream in = fs.open(name);
+ for(int remaining, n;
+ (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
+ offset += n);
+ in.close();
+
+ String s = new String(b, 0, offset);
+ System.out.println("s=" + s);
+ return s;
+ }
+
+ private void cleanupFile(FileSystem fs, Path name) throws IOException {
+ assertTrue(fs.exists(name));
+ fs.delete(name, true);
+ assertTrue(!fs.exists(name));
+ }
+
+ /**
+ * Test the capability of setting the working directory.
+ */
+ public void testWorkingDirectory() throws IOException {
+ Configuration conf = new Configuration();
+ FileSystem fileSys = FileSystem.getLocal(conf);
+ Path origDir = fileSys.getWorkingDirectory();
+ Path subdir = new Path(TEST_ROOT_DIR, "new");
+ try {
+ // make sure it doesn't already exist
+ assertTrue(!fileSys.exists(subdir));
+ // make it and check for it
+ assertTrue(fileSys.mkdirs(subdir));
+ assertTrue(fileSys.isDirectory(subdir));
+
+ fileSys.setWorkingDirectory(subdir);
+
+ // create a directory and check for it
+ Path dir1 = new Path("dir1");
+ assertTrue(fileSys.mkdirs(dir1));
+ assertTrue(fileSys.isDirectory(dir1));
+
+ // delete the directory and make sure it went away
+ fileSys.delete(dir1, true);
+ assertTrue(!fileSys.exists(dir1));
+
+ // create files and manipulate them.
+ Path file1 = new Path("file1");
+ Path file2 = new Path("sub/file2");
+ writeFile(fileSys, file1);
+ fileSys.copyFromLocalFile(file1, file2);
+ assertTrue(fileSys.exists(file1));
+ assertTrue(fileSys.isFile(file1));
+ cleanupFile(fileSys, file2);
+ fileSys.copyToLocalFile(file1, file2);
+ cleanupFile(fileSys, file2);
+
+ // try a rename
+ fileSys.rename(file1, file2);
+ assertTrue(!fileSys.exists(file1));
+ assertTrue(fileSys.exists(file2));
+ fileSys.rename(file2, file1);
+
+ // try reading a file
+ InputStream stm = fileSys.open(file1);
+ byte[] buffer = new byte[3];
+ int bytesRead = stm.read(buffer, 0, 3);
+ assertEquals("42\n", new String(buffer, 0, bytesRead));
+ stm.close();
+ } finally {
+ fileSys.setWorkingDirectory(origDir);
+ fileSys.delete(subdir, true);
+ }
+ }
+
+ public void testCopy() throws IOException {
+ Configuration conf = new Configuration();
+ LocalFileSystem fs = FileSystem.getLocal(conf);
+ Path src = new Path(TEST_ROOT_DIR, "dingo");
+ Path dst = new Path(TEST_ROOT_DIR, "yak");
+ writeFile(fs, src);
+ assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf));
+ assertTrue(!fs.exists(src) && fs.exists(dst));
+ assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
+ assertTrue(fs.exists(src) && fs.exists(dst));
+ assertTrue(FileUtil.copy(fs, src, fs, dst, true, true, conf));
+ assertTrue(!fs.exists(src) && fs.exists(dst));
+ fs.mkdirs(src);
+ assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
+ Path tmp = new Path(src, dst.getName());
+ assertTrue(fs.exists(tmp) && fs.exists(dst));
+ assertTrue(FileUtil.copy(fs, dst, fs, src, false, true, conf));
+ assertTrue(fs.delete(tmp, true));
+ fs.mkdirs(tmp);
+ try {
+ FileUtil.copy(fs, dst, fs, src, true, true, conf);
+ fail("Failed to detect existing dir");
+ } catch (IOException e) { }
+ }
+
+ public void testHomeDirectory() throws IOException {
+ Configuration conf = new Configuration();
+ FileSystem fileSys = FileSystem.getLocal(conf);
+ Path home = new Path(System.getProperty("user.home"))
+ .makeQualified(fileSys);
+ Path fsHome = fileSys.getHomeDirectory();
+ assertEquals(home, fsHome);
+ }
+
+ public void testPathEscapes() throws IOException {
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path path = new Path(TEST_ROOT_DIR, "foo%bar");
+ writeFile(fs, path);
+ FileStatus status = fs.getFileStatus(path);
+ assertEquals(path.makeQualified(fs), status.getPath());
+ cleanupFile(fs, path);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestLocalFileSystemPermission.java b/src/test/core/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
new file mode 100644
index 0000000000..f68cdb66cd
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestLocalFileSystemPermission.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Shell;
+
+import java.io.*;
+import java.util.*;
+
+import junit.framework.*;
+
+/**
+ * This class tests the local file system via the FileSystem abstraction.
+ */
+public class TestLocalFileSystemPermission extends TestCase {
+ static final String TEST_PATH_PREFIX = new Path(System.getProperty(
+ "test.build.data", "/tmp")).toString().replace(' ', '_')
+ + "/" + TestLocalFileSystemPermission.class.getSimpleName() + "_";
+
+ {
+ try {
+ ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
+ .setLevel(org.apache.log4j.Level.DEBUG);
+ }
+ catch(Exception e) {
+ System.out.println("Cannot change log level\n"
+ + StringUtils.stringifyException(e));
+ }
+ }
+
+ private Path writeFile(FileSystem fs, String name) throws IOException {
+ Path f = new Path(TEST_PATH_PREFIX + name);
+ FSDataOutputStream stm = fs.create(f);
+ stm.writeBytes("42\n");
+ stm.close();
+ return f;
+ }
+
+ private void cleanupFile(FileSystem fs, Path name) throws IOException {
+ assertTrue(fs.exists(name));
+ fs.delete(name, true);
+ assertTrue(!fs.exists(name));
+ }
+
+ /** Test LocalFileSystem.setPermission */
+ public void testLocalFSsetPermission() throws IOException {
+ if (Path.WINDOWS) {
+ System.out.println("Cannot run test for Windows");
+ return;
+ }
+ Configuration conf = new Configuration();
+ LocalFileSystem localfs = FileSystem.getLocal(conf);
+ String filename = "foo";
+ Path f = writeFile(localfs, filename);
+ try {
+ System.out.println(filename + ": " + getPermission(localfs, f));
+ }
+ catch(Exception e) {
+ System.out.println(StringUtils.stringifyException(e));
+ System.out.println("Cannot run test");
+ return;
+ }
+
+ try {
+ // create files and manipulate them.
+ FsPermission all = new FsPermission((short)0777);
+ FsPermission none = new FsPermission((short)0);
+
+ localfs.setPermission(f, none);
+ assertEquals(none, getPermission(localfs, f));
+
+ localfs.setPermission(f, all);
+ assertEquals(all, getPermission(localfs, f));
+ }
+ finally {cleanupFile(localfs, f);}
+ }
+
+ FsPermission getPermission(LocalFileSystem fs, Path p) throws IOException {
+ return fs.getFileStatus(p).getPermission();
+ }
+
+ /** Test LocalFileSystem.setOwner */
+ public void testLocalFSsetOwner() throws IOException {
+ if (Path.WINDOWS) {
+ System.out.println("Cannot run test for Windows");
+ return;
+ }
+
+ Configuration conf = new Configuration();
+ LocalFileSystem localfs = FileSystem.getLocal(conf);
+ String filename = "bar";
+ Path f = writeFile(localfs, filename);
+ List groups = null;
+ try {
+ groups = getGroups();
+ System.out.println(filename + ": " + getPermission(localfs, f));
+ }
+ catch(IOException e) {
+ System.out.println(StringUtils.stringifyException(e));
+ System.out.println("Cannot run test");
+ return;
+ }
+ if (groups == null || groups.size() < 1) {
+ System.out.println("Cannot run test: need at least one group. groups="
+ + groups);
+ return;
+ }
+
+ // create files and manipulate them.
+ try {
+ String g0 = groups.get(0);
+ localfs.setOwner(f, null, g0);
+ assertEquals(g0, getGroup(localfs, f));
+
+ if (groups.size() > 1) {
+ String g1 = groups.get(1);
+ localfs.setOwner(f, null, g1);
+ assertEquals(g1, getGroup(localfs, f));
+ } else {
+ System.out.println("Not testing changing the group since user " +
+ "belongs to only one group.");
+ }
+ }
+ finally {cleanupFile(localfs, f);}
+ }
+
+ static List getGroups() throws IOException {
+ List a = new ArrayList();
+ String s = Shell.execCommand(Shell.getGROUPS_COMMAND());
+ for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) {
+ a.add(t.nextToken());
+ }
+ return a;
+ }
+
+ String getGroup(LocalFileSystem fs, Path p) throws IOException {
+ return fs.getFileStatus(p).getGroup();
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestPath.java b/src/test/core/org/apache/hadoop/fs/TestPath.java
new file mode 100644
index 0000000000..4fa28bc77c
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestPath.java
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.util.*;
+import junit.framework.TestCase;
+
+public class TestPath extends TestCase {
+ public void testToString() {
+ toStringTest("/");
+ toStringTest("/foo");
+ toStringTest("/foo/bar");
+ toStringTest("foo");
+ toStringTest("foo/bar");
+ boolean emptyException = false;
+ try {
+ toStringTest("");
+ } catch (IllegalArgumentException e) {
+ // expect to receive an IllegalArgumentException
+ emptyException = true;
+ }
+ assertTrue(emptyException);
+ if (Path.WINDOWS) {
+ toStringTest("c:");
+ toStringTest("c:/");
+ toStringTest("c:foo");
+ toStringTest("c:foo/bar");
+ toStringTest("c:foo/bar");
+ toStringTest("c:/foo/bar");
+ }
+ }
+
+ private void toStringTest(String pathString) {
+ assertEquals(pathString, new Path(pathString).toString());
+ }
+
+ public void testNormalize() {
+ assertEquals("/", new Path("//").toString());
+ assertEquals("/foo", new Path("/foo/").toString());
+ assertEquals("/foo", new Path("/foo/").toString());
+ assertEquals("foo", new Path("foo/").toString());
+ assertEquals("foo", new Path("foo//").toString());
+ assertEquals("foo/bar", new Path("foo//bar").toString());
+ if (Path.WINDOWS) {
+ assertEquals("c:/a/b", new Path("c:\\a\\b").toString());
+ }
+ }
+
+ public void testIsAbsolute() {
+ assertTrue(new Path("/").isAbsolute());
+ assertTrue(new Path("/foo").isAbsolute());
+ assertFalse(new Path("foo").isAbsolute());
+ assertFalse(new Path("foo/bar").isAbsolute());
+ assertFalse(new Path(".").isAbsolute());
+ if (Path.WINDOWS) {
+ assertTrue(new Path("c:/a/b").isAbsolute());
+ assertFalse(new Path("c:a/b").isAbsolute());
+ }
+ }
+
+ public void testParent() {
+ assertEquals(new Path("/foo"), new Path("/foo/bar").getParent());
+ assertEquals(new Path("foo"), new Path("foo/bar").getParent());
+ assertEquals(new Path("/"), new Path("/foo").getParent());
+ if (Path.WINDOWS) {
+ assertEquals(new Path("c:/"), new Path("c:/foo").getParent());
+ }
+ }
+
+ public void testChild() {
+ assertEquals(new Path("."), new Path(".", "."));
+ assertEquals(new Path("/"), new Path("/", "."));
+ assertEquals(new Path("/"), new Path(".", "/"));
+ assertEquals(new Path("/foo"), new Path("/", "foo"));
+ assertEquals(new Path("/foo/bar"), new Path("/foo", "bar"));
+ assertEquals(new Path("/foo/bar/baz"), new Path("/foo/bar", "baz"));
+ assertEquals(new Path("/foo/bar/baz"), new Path("/foo", "bar/baz"));
+ assertEquals(new Path("foo"), new Path(".", "foo"));
+ assertEquals(new Path("foo/bar"), new Path("foo", "bar"));
+ assertEquals(new Path("foo/bar/baz"), new Path("foo", "bar/baz"));
+ assertEquals(new Path("foo/bar/baz"), new Path("foo/bar", "baz"));
+ assertEquals(new Path("/foo"), new Path("/bar", "/foo"));
+ if (Path.WINDOWS) {
+ assertEquals(new Path("c:/foo"), new Path("/bar", "c:/foo"));
+ assertEquals(new Path("c:/foo"), new Path("d:/bar", "c:/foo"));
+ }
+ }
+
+ public void testEquals() {
+ assertFalse(new Path("/").equals(new Path("/foo")));
+ }
+
+ public void testDots() {
+ // Test Path(String)
+ assertEquals(new Path("/foo/bar/baz").toString(), "/foo/bar/baz");
+ assertEquals(new Path("/foo/bar", ".").toString(), "/foo/bar");
+ assertEquals(new Path("/foo/bar/../baz").toString(), "/foo/baz");
+ assertEquals(new Path("/foo/bar/./baz").toString(), "/foo/bar/baz");
+ assertEquals(new Path("/foo/bar/baz/../../fud").toString(), "/foo/fud");
+ assertEquals(new Path("/foo/bar/baz/.././../fud").toString(), "/foo/fud");
+ assertEquals(new Path("../../foo/bar").toString(), "../../foo/bar");
+ assertEquals(new Path(".././../foo/bar").toString(), "../../foo/bar");
+ assertEquals(new Path("./foo/bar/baz").toString(), "foo/bar/baz");
+ assertEquals(new Path("/foo/bar/../../baz/boo").toString(), "/baz/boo");
+ assertEquals(new Path("foo/bar/").toString(), "foo/bar");
+ assertEquals(new Path("foo/bar/../baz").toString(), "foo/baz");
+ assertEquals(new Path("foo/bar/../../baz/boo").toString(), "baz/boo");
+
+
+ // Test Path(Path,Path)
+ assertEquals(new Path("/foo/bar", "baz/boo").toString(), "/foo/bar/baz/boo");
+ assertEquals(new Path("foo/bar/","baz/bud").toString(), "foo/bar/baz/bud");
+
+ assertEquals(new Path("/foo/bar","../../boo/bud").toString(), "/boo/bud");
+ assertEquals(new Path("foo/bar","../../boo/bud").toString(), "boo/bud");
+ assertEquals(new Path(".","boo/bud").toString(), "boo/bud");
+
+ assertEquals(new Path("/foo/bar/baz","../../boo/bud").toString(), "/foo/boo/bud");
+ assertEquals(new Path("foo/bar/baz","../../boo/bud").toString(), "foo/boo/bud");
+
+
+ assertEquals(new Path("../../","../../boo/bud").toString(), "../../../../boo/bud");
+ assertEquals(new Path("../../foo","../../../boo/bud").toString(), "../../../../boo/bud");
+ assertEquals(new Path("../../foo/bar","../boo/bud").toString(), "../../foo/boo/bud");
+
+ assertEquals(new Path("foo/bar/baz","../../..").toString(), "");
+ assertEquals(new Path("foo/bar/baz","../../../../..").toString(), "../..");
+ }
+
+ public void testScheme() throws java.io.IOException {
+ assertEquals("foo:/bar", new Path("foo:/","/bar").toString());
+ assertEquals("foo://bar/baz", new Path("foo://bar/","/baz").toString());
+ }
+
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestTrash.java b/src/test/core/org/apache/hadoop/fs/TestTrash.java
new file mode 100644
index 0000000000..cff1f2419b
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestTrash.java
@@ -0,0 +1,313 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+
+import junit.framework.TestCase;
+import java.io.File;
+import java.io.IOException;
+import java.io.DataOutputStream;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.LocalFileSystem;
+
+/**
+ * This class tests commands from Trash.
+ */
+public class TestTrash extends TestCase {
+
+ private final static Path TEST_DIR =
+ new Path(new File(System.getProperty("test.build.data","/tmp")
+ ).toURI().toString().replace(' ', '+'), "testTrash");
+
+ protected static Path writeFile(FileSystem fs, Path f) throws IOException {
+ DataOutputStream out = fs.create(f);
+ out.writeBytes("dhruba: " + f);
+ out.close();
+ assertTrue(fs.exists(f));
+ return f;
+ }
+
+ protected static Path mkdir(FileSystem fs, Path p) throws IOException {
+ assertTrue(fs.mkdirs(p));
+ assertTrue(fs.exists(p));
+ assertTrue(fs.getFileStatus(p).isDir());
+ return p;
+ }
+
+ // check that the specified file is in Trash
+ protected static void checkTrash(FileSystem fs, Path trashRoot,
+ Path path) throws IOException {
+ Path p = new Path(trashRoot+"/"+ path.toUri().getPath());
+ assertTrue(fs.exists(p));
+ }
+
+ // check that the specified file is not in Trash
+ static void checkNotInTrash(FileSystem fs, Path trashRoot, String pathname)
+ throws IOException {
+ Path p = new Path(trashRoot+"/"+ new Path(pathname).getName());
+ assertTrue(!fs.exists(p));
+ }
+
+ protected static void trashShell(final FileSystem fs, final Path base)
+ throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("fs.trash.interval", "10"); // 10 minute
+ conf.set("fs.default.name", fs.getUri().toString());
+ FsShell shell = new FsShell();
+ shell.setConf(conf);
+ Path trashRoot = null;
+
+ // First create a new directory with mkdirs
+ Path myPath = new Path(base, "test/mkdirs");
+ mkdir(fs, myPath);
+
+ // Second, create a file in that directory.
+ Path myFile = new Path(base, "test/mkdirs/myFile");
+ writeFile(fs, myFile);
+
+ // Verify that expunge without Trash directory
+ // won't throw Exception
+ {
+ String[] args = new String[1];
+ args[0] = "-expunge";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // Verify that we succeed in removing the file we created.
+ // This should go into Trash.
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = myFile.toString();
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+
+ trashRoot = shell.getCurrentTrashDir();
+ checkTrash(fs, trashRoot, myFile);
+ }
+
+ // Verify that we can recreate the file
+ writeFile(fs, myFile);
+
+ // Verify that we succeed in removing the file we re-created
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = new Path(base, "test/mkdirs/myFile").toString();
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // Verify that we can recreate the file
+ writeFile(fs, myFile);
+
+ // Verify that we succeed in removing the whole directory
+ // along with the file inside it.
+ {
+ String[] args = new String[2];
+ args[0] = "-rmr";
+ args[1] = new Path(base, "test/mkdirs").toString();
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // recreate directory
+ mkdir(fs, myPath);
+
+ // Verify that we succeed in removing the whole directory
+ {
+ String[] args = new String[2];
+ args[0] = "-rmr";
+ args[1] = new Path(base, "test/mkdirs").toString();
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // Check that we can delete a file from the trash
+ {
+ Path toErase = new Path(trashRoot, "toErase");
+ int retVal = -1;
+ writeFile(fs, toErase);
+ try {
+ retVal = shell.run(new String[] {"-rm", toErase.toString()});
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(retVal == 0);
+ checkNotInTrash (fs, trashRoot, toErase.toString());
+ checkNotInTrash (fs, trashRoot, toErase.toString()+".1");
+ }
+
+ // simulate Trash removal
+ {
+ String[] args = new String[1];
+ args[0] = "-expunge";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // verify that after expunging the Trash, it really goes away
+ checkNotInTrash(fs, trashRoot, new Path(base, "test/mkdirs/myFile").toString());
+
+ // recreate directory and file
+ mkdir(fs, myPath);
+ writeFile(fs, myFile);
+
+ // remove file first, then remove directory
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = myFile.toString();
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ checkTrash(fs, trashRoot, myFile);
+
+ args = new String[2];
+ args[0] = "-rmr";
+ args[1] = myPath.toString();
+ val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ checkTrash(fs, trashRoot, myPath);
+ }
+
+ // attempt to remove parent of trash
+ {
+ String[] args = new String[2];
+ args[0] = "-rmr";
+ args[1] = trashRoot.getParent().getParent().toString();
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == -1);
+ assertTrue(fs.exists(trashRoot));
+ }
+ }
+
+ public static void trashNonDefaultFS(Configuration conf) throws IOException {
+ conf.set("fs.trash.interval", "10"); // 10 minute
+ // attempt non-default FileSystem trash
+ {
+ final FileSystem lfs = FileSystem.getLocal(conf);
+ Path p = TEST_DIR;
+ Path f = new Path(p, "foo/bar");
+ if (lfs.exists(p)) {
+ lfs.delete(p, true);
+ }
+ try {
+ f = writeFile(lfs, f);
+
+ FileSystem.closeAll();
+ FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
+ Trash lTrash = new Trash(localFs, conf);
+ lTrash.moveToTrash(f.getParent());
+ checkTrash(localFs, lTrash.getCurrentTrashDir(), f);
+ } finally {
+ if (lfs.exists(p)) {
+ lfs.delete(p, true);
+ }
+ }
+ }
+ }
+
+ public void testTrash() throws IOException {
+ Configuration conf = new Configuration();
+ conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
+ trashShell(FileSystem.getLocal(conf), TEST_DIR);
+ }
+
+ public void testNonDefaultFS() throws IOException {
+ Configuration conf = new Configuration();
+ conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
+ conf.set("fs.default.name", "invalid://host/bar/foo");
+ trashNonDefaultFS(conf);
+ }
+
+ static class TestLFS extends LocalFileSystem {
+ Path home;
+ TestLFS() {
+ this(TEST_DIR);
+ }
+ TestLFS(Path home) {
+ super();
+ this.home = home;
+ }
+ public Path getHomeDirectory() {
+ return home;
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestTruncatedInputBug.java b/src/test/core/org/apache/hadoop/fs/TestTruncatedInputBug.java
new file mode 100644
index 0000000000..e7dabf903c
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestTruncatedInputBug.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * test for the input truncation bug when mark/reset is used.
+ * HADOOP-1489
+ */
+public class TestTruncatedInputBug extends TestCase {
+ private static String TEST_ROOT_DIR =
+ new Path(System.getProperty("test.build.data","/tmp"))
+ .toString().replace(' ', '+');
+
+ private void writeFile(FileSystem fileSys,
+ Path name, int nBytesToWrite)
+ throws IOException {
+ DataOutputStream out = fileSys.create(name);
+ for (int i = 0; i < nBytesToWrite; ++i) {
+ out.writeByte(0);
+ }
+ out.close();
+ }
+
+ /**
+ * When mark() is used on BufferedInputStream, the request
+ * size on the checksum file system can be small. However,
+ * checksum file system currently depends on the request size
+ * >= bytesPerSum to work properly.
+ */
+ public void testTruncatedInputBug() throws IOException {
+ final int ioBufSize = 512;
+ final int fileSize = ioBufSize*4;
+ int filePos = 0;
+
+ Configuration conf = new Configuration();
+ conf.setInt("io.file.buffer.size", ioBufSize);
+ FileSystem fileSys = FileSystem.getLocal(conf);
+
+ try {
+ // First create a test input file.
+ Path testFile = new Path(TEST_ROOT_DIR, "HADOOP-1489");
+ writeFile(fileSys, testFile, fileSize);
+ assertTrue(fileSys.exists(testFile));
+ assertTrue(fileSys.getFileStatus(testFile).getLen() == fileSize);
+
+ // Now read the file for ioBufSize bytes
+ FSDataInputStream in = fileSys.open(testFile, ioBufSize);
+ // seek beyond data buffered by open
+ filePos += ioBufSize * 2 + (ioBufSize - 10);
+ in.seek(filePos);
+
+ // read 4 more bytes before marking
+ for (int i = 0; i < 4; ++i) {
+ if (in.read() == -1) {
+ break;
+ }
+ ++filePos;
+ }
+
+ // Now set mark() to trigger the bug
+ // NOTE: in the fixed code, mark() does nothing (not supported) and
+ // hence won't trigger this bug.
+ in.mark(1);
+ System.out.println("MARKED");
+
+ // Try to read the rest
+ while (filePos < fileSize) {
+ if (in.read() == -1) {
+ break;
+ }
+ ++filePos;
+ }
+ in.close();
+
+ System.out.println("Read " + filePos + " bytes."
+ + " file size=" + fileSize);
+ assertTrue(filePos == fileSize);
+
+ } finally {
+ try {
+ fileSys.close();
+ } catch (Exception e) {
+ // noop
+ }
+ }
+ } // end testTruncatedInputBug
+}
diff --git a/src/test/core/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java b/src/test/core/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java
new file mode 100644
index 0000000000..9c7b5bafef
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java
@@ -0,0 +1,150 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * @author: Sriram Rao (Kosmix Corp.)
+ *
+ * We need to provide the ability to the code in fs/kfs without really
+ * having a KFS deployment. For this purpose, use the LocalFileSystem
+ * as a way to "emulate" KFS.
+ */
+
+package org.apache.hadoop.fs.kfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Progressable;
+
+public class KFSEmulationImpl implements IFSImpl {
+ FileSystem localFS;
+
+ public KFSEmulationImpl(Configuration conf) throws IOException {
+ localFS = FileSystem.getLocal(conf);
+ }
+
+ public boolean exists(String path) throws IOException {
+ return localFS.exists(new Path(path));
+ }
+ public boolean isDirectory(String path) throws IOException {
+ return localFS.isDirectory(new Path(path));
+ }
+ public boolean isFile(String path) throws IOException {
+ return localFS.isFile(new Path(path));
+ }
+
+ public String[] readdir(String path) throws IOException {
+ FileStatus[] p = localFS.listStatus(new Path(path));
+ String[] entries = null;
+
+ if (p == null) {
+ return null;
+ }
+
+ entries = new String[p.length];
+ for (int i = 0; i < p.length; i++)
+ entries[i] = p[i].getPath().toString();
+ return entries;
+ }
+
+ public FileStatus[] readdirplus(Path path) throws IOException {
+ return localFS.listStatus(path);
+ }
+
+ public int mkdirs(String path) throws IOException {
+ if (localFS.mkdirs(new Path(path)))
+ return 0;
+
+ return -1;
+ }
+
+ public int rename(String source, String dest) throws IOException {
+ if (localFS.rename(new Path(source), new Path(dest)))
+ return 0;
+ return -1;
+ }
+
+ public int rmdir(String path) throws IOException {
+ if (isDirectory(path)) {
+ // the directory better be empty
+ String[] dirEntries = readdir(path);
+ if ((dirEntries.length <= 2) && (localFS.delete(new Path(path), true)))
+ return 0;
+ }
+ return -1;
+ }
+
+ public int remove(String path) throws IOException {
+ if (isFile(path) && (localFS.delete(new Path(path), true)))
+ return 0;
+ return -1;
+ }
+
+ public long filesize(String path) throws IOException {
+ return localFS.getFileStatus(new Path(path)).getLen();
+ }
+ public short getReplication(String path) throws IOException {
+ return 1;
+ }
+ public short setReplication(String path, short replication) throws IOException {
+ return 1;
+ }
+ public String[][] getDataLocation(String path, long start, long len) throws IOException {
+ BlockLocation[] blkLocations =
+ localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)),
+ start, len);
+ if ((blkLocations == null) || (blkLocations.length == 0)) {
+ return new String[0][];
+ }
+ int blkCount = blkLocations.length;
+ String[][]hints = new String[blkCount][];
+ for (int i=0; i < blkCount ; i++) {
+ String[] hosts = blkLocations[i].getHosts();
+ hints[i] = new String[hosts.length];
+ hints[i] = hosts;
+ }
+ return hints;
+ }
+
+ public long getModificationTime(String path) throws IOException {
+ FileStatus s = localFS.getFileStatus(new Path(path));
+ if (s == null)
+ return 0;
+
+ return s.getModificationTime();
+ }
+
+ public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
+ // besides path/overwrite, the other args don't matter for
+ // testing purposes.
+ return localFS.append(new Path(path));
+ }
+
+ public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
+ // besides path/overwrite, the other args don't matter for
+ // testing purposes.
+ return localFS.create(new Path(path));
+ }
+
+ public FSDataInputStream open(String path, int bufferSize) throws IOException {
+ return localFS.open(new Path(path));
+ }
+
+
+};
diff --git a/src/test/core/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java b/src/test/core/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java
new file mode 100644
index 0000000000..c853f2af3f
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java
@@ -0,0 +1,204 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * @author: Sriram Rao (Kosmix Corp.)
+ *
+ * Unit tests for testing the KosmosFileSystem API implementation.
+ */
+
+package org.apache.hadoop.fs.kfs;
+
+import java.io.*;
+import java.net.*;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+
+import org.apache.hadoop.fs.kfs.KosmosFileSystem;
+
+public class TestKosmosFileSystem extends TestCase {
+
+ KosmosFileSystem kosmosFileSystem;
+ KFSEmulationImpl kfsEmul;
+ Path baseDir;
+
+ @Override
+ protected void setUp() throws IOException {
+ Configuration conf = new Configuration();
+
+ kfsEmul = new KFSEmulationImpl(conf);
+ kosmosFileSystem = new KosmosFileSystem(kfsEmul);
+ // a dummy URI; we are not connecting to any setup here
+ kosmosFileSystem.initialize(URI.create("kfs:///"), conf);
+ baseDir = new Path(System.getProperty("test.build.data", "/tmp" ) +
+ "/kfs-test");
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+
+ }
+
+ // @Test
+ // Check all the directory API's in KFS
+ public void testDirs() throws Exception {
+ Path subDir1 = new Path("dir.1");
+
+ // make the dir
+ kosmosFileSystem.mkdirs(baseDir);
+ assertTrue(kosmosFileSystem.isDirectory(baseDir));
+ kosmosFileSystem.setWorkingDirectory(baseDir);
+
+ kosmosFileSystem.mkdirs(subDir1);
+ assertTrue(kosmosFileSystem.isDirectory(subDir1));
+
+ assertFalse(kosmosFileSystem.exists(new Path("test1")));
+ assertFalse(kosmosFileSystem.isDirectory(new Path("test/dir.2")));
+
+ FileStatus[] p = kosmosFileSystem.listStatus(baseDir);
+ assertEquals(p.length, 1);
+
+ kosmosFileSystem.delete(baseDir, true);
+ assertFalse(kosmosFileSystem.exists(baseDir));
+ }
+
+ // @Test
+ // Check the file API's
+ public void testFiles() throws Exception {
+ Path subDir1 = new Path("dir.1");
+ Path file1 = new Path("dir.1/foo.1");
+ Path file2 = new Path("dir.1/foo.2");
+
+ kosmosFileSystem.mkdirs(baseDir);
+ assertTrue(kosmosFileSystem.isDirectory(baseDir));
+ kosmosFileSystem.setWorkingDirectory(baseDir);
+
+ kosmosFileSystem.mkdirs(subDir1);
+
+ FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
+ FSDataOutputStream s2 = kosmosFileSystem.create(file2, true, 4096, (short) 1, (long) 4096, null);
+
+ s1.close();
+ s2.close();
+
+ FileStatus[] p = kosmosFileSystem.listStatus(subDir1);
+ assertEquals(p.length, 2);
+
+ kosmosFileSystem.delete(file1, true);
+ p = kosmosFileSystem.listStatus(subDir1);
+ assertEquals(p.length, 1);
+
+ kosmosFileSystem.delete(file2, true);
+ p = kosmosFileSystem.listStatus(subDir1);
+ assertEquals(p.length, 0);
+
+ kosmosFileSystem.delete(baseDir, true);
+ assertFalse(kosmosFileSystem.exists(baseDir));
+ }
+
+ // @Test
+ // Check file/read write
+ public void testFileIO() throws Exception {
+ Path subDir1 = new Path("dir.1");
+ Path file1 = new Path("dir.1/foo.1");
+
+ kosmosFileSystem.mkdirs(baseDir);
+ assertTrue(kosmosFileSystem.isDirectory(baseDir));
+ kosmosFileSystem.setWorkingDirectory(baseDir);
+
+ kosmosFileSystem.mkdirs(subDir1);
+
+ FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
+
+ int bufsz = 4096;
+ byte[] data = new byte[bufsz];
+
+ for (int i = 0; i < data.length; i++)
+ data[i] = (byte) (i % 16);
+
+ // write 4 bytes and read them back; read API should return a byte per call
+ s1.write(32);
+ s1.write(32);
+ s1.write(32);
+ s1.write(32);
+ // write some data
+ s1.write(data, 0, data.length);
+ // flush out the changes
+ s1.close();
+
+ // Read the stuff back and verify it is correct
+ FSDataInputStream s2 = kosmosFileSystem.open(file1, 4096);
+ int v;
+ long nread = 0;
+
+ v = s2.read();
+ assertEquals(v, 32);
+ v = s2.read();
+ assertEquals(v, 32);
+ v = s2.read();
+ assertEquals(v, 32);
+ v = s2.read();
+ assertEquals(v, 32);
+
+ assertEquals(s2.available(), data.length);
+
+ byte[] buf = new byte[bufsz];
+ s2.read(buf, 0, buf.length);
+ nread = s2.getPos();
+
+ for (int i = 0; i < data.length; i++)
+ assertEquals(data[i], buf[i]);
+
+ assertEquals(s2.available(), 0);
+
+ s2.close();
+
+ // append some data to the file
+ try {
+ s1 = kosmosFileSystem.append(file1);
+ for (int i = 0; i < data.length; i++)
+ data[i] = (byte) (i % 17);
+ // write the data
+ s1.write(data, 0, data.length);
+ // flush out the changes
+ s1.close();
+
+ // read it back and validate
+ s2 = kosmosFileSystem.open(file1, 4096);
+ s2.seek(nread);
+ s2.read(buf, 0, buf.length);
+ for (int i = 0; i < data.length; i++)
+ assertEquals(data[i], buf[i]);
+
+ s2.close();
+ } catch (Exception e) {
+ System.out.println("append isn't supported by the underlying fs");
+ }
+
+ kosmosFileSystem.delete(file1, true);
+ assertFalse(kosmosFileSystem.exists(file1));
+ kosmosFileSystem.delete(subDir1, true);
+ assertFalse(kosmosFileSystem.exists(subDir1));
+ kosmosFileSystem.delete(baseDir, true);
+ assertFalse(kosmosFileSystem.exists(baseDir));
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/loadGenerator/DataGenerator.java b/src/test/core/org/apache/hadoop/fs/loadGenerator/DataGenerator.java
new file mode 100644
index 0000000000..4825bbada5
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/loadGenerator/DataGenerator.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.loadGenerator;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * This program reads the directory structure and file structure from
+ * the input directory and creates the namespace in the file system
+ * specified by the configuration in the specified root.
+ * All the files are filled with 'a'.
+ *
+ * The synopsis of the command is
+ * java DataGenerator
+ * -inDir : input directory name where directory/file structures
+ * are stored. Its default value is the current directory.
+ * -root : the name of the root directory which the new namespace
+ * is going to be placed under.
+ * Its default value is "/testLoadSpace".
+ */
+public class DataGenerator extends Configured implements Tool {
+ private File inDir = StructureGenerator.DEFAULT_STRUCTURE_DIRECTORY;
+ private Path root = DEFAULT_ROOT;
+ private FileSystem fs;
+ final static private long BLOCK_SIZE = 10;
+ final static private String USAGE = "java DataGenerator " +
+ "-inDir " +
+ "-root ";
+
+ /** default name of the root where the test namespace will be placed under */
+ final static Path DEFAULT_ROOT = new Path("/testLoadSpace");
+
+ /** Main function.
+ * It first parses the command line arguments.
+ * It then reads the directory structure from the input directory
+ * structure file and creates directory structure in the file system
+ * namespace. Afterwards it reads the file attributes and creates files
+ * in the file. All file content is filled with 'a'.
+ */
+ public int run(String[] args) throws Exception {
+ int exitCode = 0;
+ exitCode = init(args);
+ if (exitCode != 0) {
+ return exitCode;
+ }
+ genDirStructure();
+ genFiles();
+ return exitCode;
+ }
+
+ /** Parse the command line arguments and initialize the data */
+ private int init(String[] args) {
+ try { // initialize file system handle
+ fs = FileSystem.get(getConf());
+ } catch (IOException ioe) {
+ System.err.println("Can not initialize the file system: " +
+ ioe.getLocalizedMessage());
+ return -1;
+ }
+
+ for (int i = 0; i < args.length; i++) { // parse command line
+ if (args[i].equals("-root")) {
+ root = new Path(args[++i]);
+ } else if (args[i].equals("-inDir")) {
+ inDir = new File(args[++i]);
+ } else {
+ System.err.println(USAGE);
+ ToolRunner.printGenericCommandUsage(System.err);
+ System.exit(-1);
+ }
+ }
+ return 0;
+ }
+
+ /** Read directory structure file under the input directory.
+ * Create each directory under the specified root.
+ * The directory names are relative to the specified root.
+ */
+ private void genDirStructure() throws IOException {
+ BufferedReader in = new BufferedReader(
+ new FileReader(new File(inDir,
+ StructureGenerator.DIR_STRUCTURE_FILE_NAME)));
+ String line;
+ while ((line=in.readLine()) != null) {
+ fs.mkdirs(new Path(root+line));
+ }
+ }
+
+ /** Read file structure file under the input directory.
+ * Create each file under the specified root.
+ * The file names are relative to the root.
+ */
+ private void genFiles() throws IOException {
+ BufferedReader in = new BufferedReader(
+ new FileReader(new File(inDir,
+ StructureGenerator.FILE_STRUCTURE_FILE_NAME)));
+ String line;
+ while ((line=in.readLine()) != null) {
+ String[] tokens = line.split(" ");
+ if (tokens.length != 2) {
+ throw new IOException("Expect at most 2 tokens per line: " + line);
+ }
+ String fileName = root+tokens[0];
+ long fileSize = (long)(BLOCK_SIZE*Double.parseDouble(tokens[1]));
+ genFile(new Path(fileName), fileSize);
+ }
+ }
+
+ /** Create a file with the name file
and
+ * a length of fileSize
. The file is filled with character 'a'.
+ */
+ private void genFile(Path file, long fileSize) throws IOException {
+ FSDataOutputStream out = fs.create(file, true,
+ getConf().getInt("io.file.buffer.size", 4096),
+ (short)getConf().getInt("dfs.replication", 3),
+ fs.getDefaultBlockSize());
+ for(long i=0; i: read probability [0, 1]
+ * with a default value of 0.3333.
+ * -writeProbability : write probability [0, 1]
+ * with a default value of 0.3333.
+ * -root : test space with a default value of /testLoadSpace
+ * -maxDelayBetweenOps :
+ * Max delay in the unit of milliseconds between two operations with a
+ * default value of 0 indicating no delay.
+ * -numOfThreads :
+ * number of threads to spawn with a default value of 200.
+ * -elapsedTime :
+ * the elapsed time of program with a default value of 0
+ * indicating running forever
+ * -startTime : when the threads start to run.
+ * -scriptFile : text file to parse for scripted operation
+ */
+public class LoadGenerator extends Configured implements Tool {
+ public static final Log LOG = LogFactory.getLog(LoadGenerator.class);
+
+ private volatile boolean shouldRun = true;
+ private Path root = DataGenerator.DEFAULT_ROOT;
+ private FileSystem fs;
+ private int maxDelayBetweenOps = 0;
+ private int numOfThreads = 200;
+ private long [] durations = {0};
+ private double [] readProbs = {0.3333};
+ private double [] writeProbs = {0.3333};
+ private volatile int currentIndex = 0;
+ long totalTime = 0;
+ private long startTime = System.currentTimeMillis()+10000;
+ final static private int BLOCK_SIZE = 10;
+ private ArrayList files = new ArrayList(); // a table of file names
+ private ArrayList dirs = new ArrayList(); // a table of directory names
+ private Random r = null;
+ final private static String USAGE = "java LoadGenerator\n" +
+ "-readProbability \n" +
+ "-writeProbability \n" +
+ "-root \n" +
+ "-maxDelayBetweenOps \n" +
+ "-numOfThreads \n" +
+ "-elapsedTime \n" +
+ "-startTime \n" +
+ "-scriptFile ";
+ final private String hostname;
+
+ /** Constructor */
+ public LoadGenerator() throws IOException, UnknownHostException {
+ InetAddress addr = InetAddress.getLocalHost();
+ hostname = addr.getHostName();
+ }
+
+ private final static int OPEN = 0;
+ private final static int LIST = 1;
+ private final static int CREATE = 2;
+ private final static int WRITE_CLOSE = 3;
+ private final static int DELETE = 4;
+ private final static int TOTAL_OP_TYPES =5;
+ private long [] executionTime = new long[TOTAL_OP_TYPES];
+ private long [] totalNumOfOps = new long[TOTAL_OP_TYPES];
+
+ /** A thread sends a stream of requests to the NameNode.
+ * At each iteration, it first decides if it is going to read a file,
+ * create a file, or listing a directory following the read
+ * and write probabilities.
+ * When reading, it randomly picks a file in the test space and reads
+ * the entire file. When writing, it randomly picks a directory in the
+ * test space and creates a file whose name consists of the current
+ * machine's host name and the thread id. The length of the file
+ * follows Gaussian distribution with an average size of 2 blocks and
+ * the standard deviation of 1 block. The new file is filled with 'a'.
+ * Immediately after the file creation completes, the file is deleted
+ * from the test space.
+ * While listing, it randomly picks a directory in the test space and
+ * list the directory content.
+ * Between two consecutive operations, the thread pauses for a random
+ * amount of time in the range of [0, maxDelayBetweenOps]
+ * if the specified max delay is not zero.
+ * A thread runs for the specified elapsed time if the time isn't zero.
+ * Otherwise, it runs forever.
+ */
+ private class DFSClientThread extends Thread {
+ private int id;
+ private long [] executionTime = new long[TOTAL_OP_TYPES];
+ private long [] totalNumOfOps = new long[TOTAL_OP_TYPES];
+ private byte[] buffer = new byte[1024];
+
+ private DFSClientThread(int id) {
+ this.id = id;
+ }
+
+ /** Main loop
+ * Each iteration decides what's the next operation and then pauses.
+ */
+ public void run() {
+ try {
+ while (shouldRun) {
+ nextOp();
+ delay();
+ }
+ } catch (Exception ioe) {
+ System.err.println(ioe.getLocalizedMessage());
+ ioe.printStackTrace();
+ }
+ }
+
+ /** Let the thread pause for a random amount of time in the range of
+ * [0, maxDelayBetweenOps] if the delay is not zero. Otherwise, no pause.
+ */
+ private void delay() throws InterruptedException {
+ if (maxDelayBetweenOps>0) {
+ int delay = r.nextInt(maxDelayBetweenOps);
+ Thread.sleep(delay);
+ }
+ }
+
+ /** Perform the next operation.
+ *
+ * Depending on the read and write probabilities, the next
+ * operation could be either read, write, or list.
+ */
+ private void nextOp() throws IOException {
+ double rn = r.nextDouble();
+ int i = currentIndex;
+
+ if(LOG.isDebugEnabled())
+ LOG.debug("Thread " + this.id + " moving to index " + i);
+
+ if (rn < readProbs[i]) {
+ read();
+ } else if (rn < readProbs[i] + writeProbs[i]) {
+ write();
+ } else {
+ list();
+ }
+ }
+
+ /** Read operation randomly picks a file in the test space and reads
+ * the entire file */
+ private void read() throws IOException {
+ String fileName = files.get(r.nextInt(files.size()));
+ long startTime = System.currentTimeMillis();
+ InputStream in = fs.open(new Path(fileName));
+ executionTime[OPEN] += (System.currentTimeMillis()-startTime);
+ totalNumOfOps[OPEN]++;
+ while (in.read(buffer) != -1) {}
+ in.close();
+ }
+
+ /** The write operation randomly picks a directory in the
+ * test space and creates a file whose name consists of the current
+ * machine's host name and the thread id. The length of the file
+ * follows Gaussian distribution with an average size of 2 blocks and
+ * the standard deviation of 1 block. The new file is filled with 'a'.
+ * Immediately after the file creation completes, the file is deleted
+ * from the test space.
+ */
+ private void write() throws IOException {
+ String dirName = dirs.get(r.nextInt(dirs.size()));
+ Path file = new Path(dirName, hostname+id);
+ double fileSize = 0;
+ while ((fileSize = r.nextGaussian()+2)<=0) {}
+ genFile(file, (long)(fileSize*BLOCK_SIZE));
+ long startTime = System.currentTimeMillis();
+ fs.delete(file, true);
+ executionTime[DELETE] += (System.currentTimeMillis()-startTime);
+ totalNumOfOps[DELETE]++;
+ }
+
+ /** The list operation randomly picks a directory in the test space and
+ * list the directory content.
+ */
+ private void list() throws IOException {
+ String dirName = dirs.get(r.nextInt(dirs.size()));
+ long startTime = System.currentTimeMillis();
+ fs.listStatus(new Path(dirName));
+ executionTime[LIST] += (System.currentTimeMillis()-startTime);
+ totalNumOfOps[LIST]++;
+ }
+ }
+
+ /** Main function:
+ * It first initializes data by parsing the command line arguments.
+ * It then starts the number of DFSClient threads as specified by
+ * the user.
+ * It stops all the threads when the specified elapsed time is passed.
+ * Before exiting, it prints the average execution for
+ * each operation and operation throughput.
+ */
+ public int run(String[] args) throws Exception {
+ int exitCode = init(args);
+ if (exitCode != 0) {
+ return exitCode;
+ }
+
+ barrier();
+
+ DFSClientThread[] threads = new DFSClientThread[numOfThreads];
+ for (int i=0; i 0) {
+ while(shouldRun) {
+ Thread.sleep(durations[currentIndex] * 1000);
+ totalTime += durations[currentIndex];
+
+ // Are we on the final line of the script?
+ if( (currentIndex + 1) == durations.length) {
+ shouldRun = false;
+ } else {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Moving to index " + currentIndex + ": r = "
+ + readProbs[currentIndex] + ", w = " + writeProbs
+ + " for duration " + durations[currentIndex]);
+ }
+ currentIndex++;
+ }
+ }
+ }
+
+ LOG.debug("Done with testing. Waiting for threads to finish.");
+ for (DFSClientThread thread : threads) {
+ thread.join();
+ for (int i=0; i 1) {
+ System.err.println(
+ "The read probability must be [0, 1]: " + readProbs[0]);
+ return -1;
+ }
+ } else if (args[i].equals("-writeProbability")) {
+ if(scriptSpecified) {
+ System.err.println("Can't specify probabilities and use script.");
+ return -1;
+ }
+ writeProbs[0] = Double.parseDouble(args[++i]);
+ if (writeProbs[0] < 0 || writeProbs[0] > 1) {
+ System.err.println(
+ "The write probability must be [0, 1]: " + writeProbs[0]);
+ return -1;
+ }
+ } else if (args[i].equals("-root")) {
+ root = new Path(args[++i]);
+ } else if (args[i].equals("-maxDelayBetweenOps")) {
+ maxDelayBetweenOps = Integer.parseInt(args[++i]); // in milliseconds
+ } else if (args[i].equals("-numOfThreads")) {
+ numOfThreads = Integer.parseInt(args[++i]);
+ if (numOfThreads <= 0) {
+ System.err.println(
+ "Number of threads must be positive: " + numOfThreads);
+ return -1;
+ }
+ } else if (args[i].equals("-startTime")) {
+ startTime = Long.parseLong(args[++i]);
+ } else if (args[i].equals("-elapsedTime")) {
+ if(scriptSpecified) {
+ System.err.println("Can't specify elapsedTime and use script.");
+ return -1;
+ }
+ durations[0] = Long.parseLong(args[++i]);
+ } else if (args[i].equals("-seed")) {
+ r = new Random(Long.parseLong(args[++i])+hostHashCode);
+ } else {
+ System.err.println(USAGE);
+ ToolRunner.printGenericCommandUsage(System.err);
+ return -1;
+ }
+ }
+ } catch (NumberFormatException e) {
+ System.err.println("Illegal parameter: " + e.getLocalizedMessage());
+ System.err.println(USAGE);
+ return -1;
+ }
+
+ for(int i = 0; i < readProbs.length; i++) {
+ if (readProbs[i] + writeProbs[i] <0 || readProbs[i]+ writeProbs[i] > 1) {
+ System.err.println(
+ "The sum of read probability and write probability must be [0, 1]: "
+ + readProbs[i] + " " + writeProbs[i]);
+ return -1;
+ }
+ }
+
+ if (r==null) {
+ r = new Random(System.currentTimeMillis()+hostHashCode);
+ }
+
+ return initFileDirTables();
+ }
+
+ /**
+ * Read a script file of the form: lines of text with duration in seconds,
+ * read probability and write probability, separated by white space.
+ *
+ * @param filename Script file
+ * @return 0 if successful, -1 if not
+ * @throws IOException if errors with file IO
+ */
+ private int loadScriptFile(String filename) throws IOException {
+ FileReader fr = new FileReader(new File(filename));
+ BufferedReader br = new BufferedReader(fr);
+ ArrayList duration = new ArrayList();
+ ArrayList readProb = new ArrayList();
+ ArrayList writeProb = new ArrayList();
+ int lineNum = 0;
+
+ String line;
+ // Read script, parse values, build array of duration, read and write probs
+ while((line = br.readLine()) != null) {
+ lineNum++;
+ if(line.startsWith("#") || line.isEmpty()) // skip comments and blanks
+ continue;
+
+ String[] a = line.split("\\s");
+ if(a.length != 3) {
+ System.err.println("Line " + lineNum +
+ ": Incorrect number of parameters: " + line);
+ }
+
+ try {
+ long d = Long.parseLong(a[0]);
+ if(d < 0) {
+ System.err.println("Line " + lineNum + ": Invalid duration: " + d);
+ return -1;
+ }
+
+ double r = Double.parseDouble(a[1]);
+ if(r < 0.0 || r > 1.0 ) {
+ System.err.println("Line " + lineNum +
+ ": The read probability must be [0, 1]: " + r);
+ return -1;
+ }
+
+ double w = Double.parseDouble(a[2]);
+ if(w < 0.0 || w > 1.0) {
+ System.err.println("Line " + lineNum +
+ ": The read probability must be [0, 1]: " + r);
+ return -1;
+ }
+
+ readProb.add(r);
+ duration.add(d);
+ writeProb.add(w);
+ } catch( NumberFormatException nfe) {
+ System.err.println(lineNum + ": Can't parse: " + line);
+ return -1;
+ }
+ }
+
+ br.close();
+ fr.close();
+
+ // Copy vectors to arrays of values, to avoid autoboxing overhead later
+ durations = new long[duration.size()];
+ readProbs = new double[readProb.size()];
+ writeProbs = new double[writeProb.size()];
+
+ for(int i = 0; i < durations.length; i++) {
+ durations[i] = duration.get(i);
+ readProbs[i] = readProb.get(i);
+ writeProbs[i] = writeProb.get(i);
+ }
+
+ if(durations[0] == 0)
+ System.err.println("Initial duration set to 0. " +
+ "Will loop until stopped manually.");
+
+ return 0;
+ }
+
+ /** Create a table that contains all directories under root and
+ * another table that contains all files under root.
+ */
+ private int initFileDirTables() {
+ try {
+ initFileDirTables(root);
+ } catch (IOException e) {
+ System.err.println(e.getLocalizedMessage());
+ e.printStackTrace();
+ return -1;
+ }
+ if (dirs.isEmpty()) {
+ System.err.println("The test space " + root + " is empty");
+ return -1;
+ }
+ if (files.isEmpty()) {
+ System.err.println("The test space " + root +
+ " does not have any file");
+ return -1;
+ }
+ return 0;
+ }
+
+ /** Create a table that contains all directories under the specified path and
+ * another table that contains all files under the specified path and
+ * whose name starts with "_file_".
+ */
+ private void initFileDirTables(Path path) throws IOException {
+ FileStatus[] stats = fs.listStatus(path);
+ if (stats != null) {
+ for (FileStatus stat : stats) {
+ if (stat.isDir()) {
+ dirs.add(stat.getPath().toString());
+ initFileDirTables(stat.getPath());
+ } else {
+ Path filePath = stat.getPath();
+ if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) {
+ files.add(filePath.toString());
+ }
+ }
+ }
+ }
+ }
+
+ /** Returns when the current number of seconds from the epoch equals
+ * the command line argument given by -startTime
.
+ * This allows multiple instances of this program, running on clock
+ * synchronized nodes, to start at roughly the same time.
+ */
+ private void barrier() {
+ long sleepTime;
+ while ((sleepTime = startTime - System.currentTimeMillis()) > 0) {
+ try {
+ Thread.sleep(sleepTime);
+ } catch (InterruptedException ex) {
+ }
+ }
+ }
+
+ /** Create a file with a length of fileSize
.
+ * The file is filled with 'a'.
+ */
+ private void genFile(Path file, long fileSize) throws IOException {
+ long startTime = System.currentTimeMillis();
+ FSDataOutputStream out = fs.create(file, true,
+ getConf().getInt("io.file.buffer.size", 4096),
+ (short)getConf().getInt("dfs.replication", 3),
+ fs.getDefaultBlockSize());
+ executionTime[CREATE] += (System.currentTimeMillis()-startTime);
+ totalNumOfOps[CREATE]++;
+
+ for (long i=0; i : maximum depth of the directory tree; default is 5.
+ -minWidth : minimum number of subdirectories per directories; default is 1
+ -maxWidth : maximum number of subdirectories per directories; default is 5
+ -numOfFiles <#OfFiles> : the total number of files; default is 10.
+ -avgFileSize : average size of blocks; default is 1.
+ -outDir : output directory; default is the current directory.
+ -seed : random number generator seed; default is the current time.
+ */
+public class StructureGenerator {
+ private int maxDepth = 5;
+ private int minWidth = 1;
+ private int maxWidth = 5;
+ private int numOfFiles = 10;
+ private double avgFileSize = 1;
+ private File outDir = DEFAULT_STRUCTURE_DIRECTORY;
+ final static private String USAGE = "java StructureGenerator\n" +
+ "-maxDepth \n" +
+ "-minWidth \n" +
+ "-maxWidth \n" +
+ "-numOfFiles <#OfFiles>\n" +
+ "-avgFileSize \n" +
+ "-outDir \n" +
+ "-seed ";
+
+ private Random r = null;
+
+ /** Default directory for storing file/directory structure */
+ final static File DEFAULT_STRUCTURE_DIRECTORY = new File(".");
+ /** The name of the file for storing directory structure */
+ final static String DIR_STRUCTURE_FILE_NAME = "dirStructure";
+ /** The name of the file for storing file structure */
+ final static String FILE_STRUCTURE_FILE_NAME = "fileStructure";
+ /** The name prefix for the files created by this program */
+ final static String FILE_NAME_PREFIX = "_file_";
+
+ /**
+ * The main function first parses the command line arguments,
+ * then generates in-memory directory structure and outputs to a file,
+ * last generates in-memory files and outputs them to a file.
+ */
+ public int run(String[] args) throws Exception {
+ int exitCode = 0;
+ exitCode = init(args);
+ if (exitCode != 0) {
+ return exitCode;
+ }
+ genDirStructure();
+ output(new File(outDir, DIR_STRUCTURE_FILE_NAME));
+ genFileStructure();
+ outputFiles(new File(outDir, FILE_STRUCTURE_FILE_NAME));
+ return exitCode;
+ }
+
+ /** Parse the command line arguments and initialize the data */
+ private int init(String[] args) {
+ try {
+ for (int i = 0; i < args.length; i++) { // parse command line
+ if (args[i].equals("-maxDepth")) {
+ maxDepth = Integer.parseInt(args[++i]);
+ if (maxDepth<1) {
+ System.err.println("maxDepth must be positive: " + maxDepth);
+ return -1;
+ }
+ } else if (args[i].equals("-minWidth")) {
+ minWidth = Integer.parseInt(args[++i]);
+ if (minWidth<0) {
+ System.err.println("minWidth must be positive: " + minWidth);
+ return -1;
+ }
+ } else if (args[i].equals("-maxWidth")) {
+ maxWidth = Integer.parseInt(args[++i]);
+ } else if (args[i].equals("-numOfFiles")) {
+ numOfFiles = Integer.parseInt(args[++i]);
+ if (numOfFiles<1) {
+ System.err.println("NumOfFiles must be positive: " + numOfFiles);
+ return -1;
+ }
+ } else if (args[i].equals("-avgFileSize")) {
+ avgFileSize = Double.parseDouble(args[++i]);
+ if (avgFileSize<=0) {
+ System.err.println("AvgFileSize must be positive: " + avgFileSize);
+ return -1;
+ }
+ } else if (args[i].equals("-outDir")) {
+ outDir = new File(args[++i]);
+ } else if (args[i].equals("-seed")) {
+ r = new Random(Long.parseLong(args[++i]));
+ } else {
+ System.err.println(USAGE);
+ ToolRunner.printGenericCommandUsage(System.err);
+ return -1;
+ }
+ }
+ } catch (NumberFormatException e) {
+ System.err.println("Illegal parameter: " + e.getLocalizedMessage());
+ System.err.println(USAGE);
+ return -1;
+ }
+
+ if (maxWidth < minWidth) {
+ System.err.println(
+ "maxWidth must be bigger than minWidth: " + maxWidth);
+ return -1;
+ }
+
+ if (r==null) {
+ r = new Random();
+ }
+ return 0;
+ }
+
+ /** In memory representation of a directory */
+ private static class INode {
+ private String name;
+ private List children = new ArrayList();
+
+ /** Constructor */
+ private INode(String name) {
+ this.name = name;
+ }
+
+ /** Add a child (subdir/file) */
+ private void addChild(INode child) {
+ children.add(child);
+ }
+
+ /** Output the subtree rooted at the current node.
+ * Only the leaves are printed.
+ */
+ private void output(PrintStream out, String prefix) {
+ prefix = prefix==null?name:prefix+"/"+name;
+ if (children.isEmpty()) {
+ out.println(prefix);
+ } else {
+ for (INode child : children) {
+ child.output(out, prefix);
+ }
+ }
+ }
+
+ /** Output the files in the subtree rooted at this node */
+ protected void outputFiles(PrintStream out, String prefix) {
+ prefix = prefix==null?name:prefix+"/"+name;
+ for (INode child : children) {
+ child.outputFiles(out, prefix);
+ }
+ }
+
+ /** Add all the leaves in the subtree to the input list */
+ private void getLeaves(List leaves) {
+ if (children.isEmpty()) {
+ leaves.add(this);
+ } else {
+ for (INode child : children) {
+ child.getLeaves(leaves);
+ }
+ }
+ }
+ }
+
+ /** In memory representation of a file */
+ private static class FileINode extends INode {
+ private double numOfBlocks;
+
+ /** constructor */
+ private FileINode(String name, double numOfBlocks) {
+ super(name);
+ this.numOfBlocks = numOfBlocks;
+ }
+
+ /** Output a file attribute */
+ protected void outputFiles(PrintStream out, String prefix) {
+ prefix = (prefix == null)?super.name: prefix + "/"+super.name;
+ out.println(prefix + " " + numOfBlocks);
+ }
+ }
+
+ private INode root;
+
+ /** Generates a directory tree with a max depth of maxDepth
*/
+ private void genDirStructure() {
+ root = genDirStructure("", maxDepth);
+ }
+
+ /** Generate a directory tree rooted at rootName
+ * The number of subtree is in the range of [minWidth, maxWidth].
+ * The maximum depth of each subtree is in the range of
+ * [2*maxDepth/3, maxDepth].
+ */
+ private INode genDirStructure(String rootName, int maxDepth) {
+ INode root = new INode(rootName);
+
+ if (maxDepth>0) {
+ maxDepth--;
+ int minDepth = maxDepth*2/3;
+ // Figure out the number of subdirectories to generate
+ int numOfSubDirs = minWidth + r.nextInt(maxWidth-minWidth+1);
+ // Expand the tree
+ for (int i=0; i getLeaves() {
+ List leaveDirs = new ArrayList();
+ root.getLeaves(leaveDirs);
+ return leaveDirs;
+ }
+
+ /** Decides where to place all the files and its length.
+ * It first collects all empty directories in the tree.
+ * For each file, it randomly chooses an empty directory to place the file.
+ * The file's length is generated using Gaussian distribution.
+ */
+ private void genFileStructure() {
+ List leaves = getLeaves();
+ int totalLeaves = leaves.size();
+ for (int i=0; i inodes = new TreeMap();
+ private Map blocks = new HashMap();
+
+ public void initialize(URI uri, Configuration conf) {
+ this.conf = conf;
+ }
+
+ public String getVersion() throws IOException {
+ return "0";
+ }
+
+ public void deleteINode(Path path) throws IOException {
+ inodes.remove(normalize(path));
+ }
+
+ public void deleteBlock(Block block) throws IOException {
+ blocks.remove(block.getId());
+ }
+
+ public boolean inodeExists(Path path) throws IOException {
+ return inodes.containsKey(normalize(path));
+ }
+
+ public boolean blockExists(long blockId) throws IOException {
+ return blocks.containsKey(blockId);
+ }
+
+ public INode retrieveINode(Path path) throws IOException {
+ return inodes.get(normalize(path));
+ }
+
+ public File retrieveBlock(Block block, long byteRangeStart) throws IOException {
+ byte[] data = blocks.get(block.getId());
+ File file = createTempFile();
+ BufferedOutputStream out = null;
+ try {
+ out = new BufferedOutputStream(new FileOutputStream(file));
+ out.write(data, (int) byteRangeStart, data.length - (int) byteRangeStart);
+ } finally {
+ if (out != null) {
+ out.close();
+ }
+ }
+ return file;
+ }
+
+ private File createTempFile() throws IOException {
+ File dir = new File(conf.get("fs.s3.buffer.dir"));
+ if (!dir.exists() && !dir.mkdirs()) {
+ throw new IOException("Cannot create S3 buffer directory: " + dir);
+ }
+ File result = File.createTempFile("test-", ".tmp", dir);
+ result.deleteOnExit();
+ return result;
+ }
+
+ public Set listSubPaths(Path path) throws IOException {
+ Path normalizedPath = normalize(path);
+ // This is inefficient but more than adequate for testing purposes.
+ Set subPaths = new LinkedHashSet();
+ for (Path p : inodes.tailMap(normalizedPath).keySet()) {
+ if (normalizedPath.equals(p.getParent())) {
+ subPaths.add(p);
+ }
+ }
+ return subPaths;
+ }
+
+ public Set listDeepSubPaths(Path path) throws IOException {
+ Path normalizedPath = normalize(path);
+ String pathString = normalizedPath.toUri().getPath();
+ if (!pathString.endsWith("/")) {
+ pathString += "/";
+ }
+ // This is inefficient but more than adequate for testing purposes.
+ Set subPaths = new LinkedHashSet();
+ for (Path p : inodes.tailMap(normalizedPath).keySet()) {
+ if (p.toUri().getPath().startsWith(pathString)) {
+ subPaths.add(p);
+ }
+ }
+ return subPaths;
+ }
+
+ public void storeINode(Path path, INode inode) throws IOException {
+ inodes.put(normalize(path), inode);
+ }
+
+ public void storeBlock(Block block, File file) throws IOException {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ byte[] buf = new byte[8192];
+ int numRead;
+ BufferedInputStream in = null;
+ try {
+ in = new BufferedInputStream(new FileInputStream(file));
+ while ((numRead = in.read(buf)) >= 0) {
+ out.write(buf, 0, numRead);
+ }
+ } finally {
+ if (in != null) {
+ in.close();
+ }
+ }
+ blocks.put(block.getId(), out.toByteArray());
+ }
+
+ private Path normalize(Path path) {
+ if (!path.isAbsolute()) {
+ throw new IllegalArgumentException("Path must be absolute: " + path);
+ }
+ return new Path(path.toUri().getPath());
+ }
+
+ public void purge() throws IOException {
+ inodes.clear();
+ blocks.clear();
+ }
+
+ public void dump() throws IOException {
+ StringBuilder sb = new StringBuilder(getClass().getSimpleName());
+ sb.append(", \n");
+ for (Map.Entry entry : inodes.entrySet()) {
+ sb.append(entry.getKey()).append("\n");
+ INode inode = entry.getValue();
+ sb.append("\t").append(inode.getFileType()).append("\n");
+ if (inode.getFileType() == FileType.DIRECTORY) {
+ continue;
+ }
+ for (int j = 0; j < inode.getBlocks().length; j++) {
+ sb.append("\t").append(inode.getBlocks()[j]).append("\n");
+ }
+ }
+ System.out.println(sb);
+
+ System.out.println(inodes.keySet());
+ System.out.println(blocks.keySet());
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java b/src/test/core/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java
new file mode 100644
index 0000000000..53b3c03c41
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3/Jets3tS3FileSystemContractTest.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.IOException;
+
+public class Jets3tS3FileSystemContractTest
+ extends S3FileSystemContractBaseTest {
+
+ @Override
+ FileSystemStore getFileSystemStore() throws IOException {
+ return new Jets3tFileSystemStore();
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java b/src/test/core/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
new file mode 100644
index 0000000000..8d6744a12a
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+
+public abstract class S3FileSystemContractBaseTest
+ extends FileSystemContractBaseTest {
+
+ private FileSystemStore store;
+
+ abstract FileSystemStore getFileSystemStore() throws IOException;
+
+ @Override
+ protected void setUp() throws Exception {
+ Configuration conf = new Configuration();
+ store = getFileSystemStore();
+ fs = new S3FileSystem(store);
+ fs.initialize(URI.create(conf.get("test.fs.s3.name")), conf);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ store.purge();
+ super.tearDown();
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3/TestINode.java b/src/test/core/org/apache/hadoop/fs/s3/TestINode.java
new file mode 100644
index 0000000000..086a43eabc
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3/TestINode.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.s3.INode.FileType;
+
+public class TestINode extends TestCase {
+
+ public void testSerializeFileWithSingleBlock() throws IOException {
+ Block[] blocks = { new Block(849282477840258181L, 128L) };
+ INode inode = new INode(FileType.FILE, blocks);
+
+ assertEquals("Length", 1L + 4 + 16, inode.getSerializedLength());
+ InputStream in = inode.serialize();
+
+ INode deserialized = INode.deserialize(in);
+
+ assertEquals("FileType", inode.getFileType(), deserialized.getFileType());
+ Block[] deserializedBlocks = deserialized.getBlocks();
+ assertEquals("Length", 1, deserializedBlocks.length);
+ assertEquals("Id", blocks[0].getId(), deserializedBlocks[0].getId());
+ assertEquals("Length", blocks[0].getLength(), deserializedBlocks[0]
+ .getLength());
+
+ }
+
+ public void testSerializeDirectory() throws IOException {
+ INode inode = INode.DIRECTORY_INODE;
+ assertEquals("Length", 1L, inode.getSerializedLength());
+ InputStream in = inode.serialize();
+ INode deserialized = INode.deserialize(in);
+ assertSame(INode.DIRECTORY_INODE, deserialized);
+ }
+
+ public void testDeserializeNull() throws IOException {
+ assertNull(INode.deserialize(null));
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java b/src/test/core/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java
new file mode 100644
index 0000000000..5d66cf12c8
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystemContract.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.IOException;
+
+public class TestInMemoryS3FileSystemContract
+ extends S3FileSystemContractBaseTest {
+
+ @Override
+ FileSystemStore getFileSystemStore() throws IOException {
+ return new InMemoryFileSystemStore();
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3/TestS3Credentials.java b/src/test/core/org/apache/hadoop/fs/s3/TestS3Credentials.java
new file mode 100644
index 0000000000..bcbf0dc607
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3/TestS3Credentials.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.s3;
+
+import java.net.URI;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class TestS3Credentials extends TestCase {
+ public void testInvalidHostnameWithUnderscores() throws Exception {
+ S3Credentials s3Credentials = new S3Credentials();
+ try {
+ s3Credentials.initialize(new URI("s3://a:b@c_d"), new Configuration());
+ fail("Should throw IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ assertEquals("Invalid hostname in URI s3://a:b@c_d", e.getMessage());
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3/TestS3FileSystem.java b/src/test/core/org/apache/hadoop/fs/s3/TestS3FileSystem.java
new file mode 100644
index 0000000000..f21989c5d9
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3/TestS3FileSystem.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3;
+
+import java.io.IOException;
+import java.net.URI;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class TestS3FileSystem extends TestCase {
+
+ public void testInitialization() throws IOException {
+ initializationTest("s3://a:b@c", "s3://a:b@c");
+ initializationTest("s3://a:b@c/", "s3://a:b@c");
+ initializationTest("s3://a:b@c/path", "s3://a:b@c");
+ initializationTest("s3://a@c", "s3://a@c");
+ initializationTest("s3://a@c/", "s3://a@c");
+ initializationTest("s3://a@c/path", "s3://a@c");
+ initializationTest("s3://c", "s3://c");
+ initializationTest("s3://c/", "s3://c");
+ initializationTest("s3://c/path", "s3://c");
+ }
+
+ private void initializationTest(String initializationUri, String expectedUri)
+ throws IOException {
+
+ S3FileSystem fs = new S3FileSystem(new InMemoryFileSystemStore());
+ fs.initialize(URI.create(initializationUri), new Configuration());
+ assertEquals(URI.create(expectedUri), fs.getUri());
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java b/src/test/core/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java
new file mode 100644
index 0000000000..d3086da9e8
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3native/InMemoryNativeFileSystemStore.java
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ *
+ * A stub implementation of {@link NativeFileSystemStore} for testing
+ * {@link NativeS3FileSystem} without actually connecting to S3.
+ *
+ */
+class InMemoryNativeFileSystemStore implements NativeFileSystemStore {
+
+ private Configuration conf;
+
+ private SortedMap metadataMap =
+ new TreeMap();
+ private SortedMap dataMap = new TreeMap();
+
+ public void initialize(URI uri, Configuration conf) throws IOException {
+ this.conf = conf;
+ }
+
+ public void storeEmptyFile(String key) throws IOException {
+ metadataMap.put(key, new FileMetadata(key, 0, System.currentTimeMillis()));
+ dataMap.put(key, new byte[0]);
+ }
+
+ public void storeFile(String key, File file, byte[] md5Hash)
+ throws IOException {
+
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ byte[] buf = new byte[8192];
+ int numRead;
+ BufferedInputStream in = null;
+ try {
+ in = new BufferedInputStream(new FileInputStream(file));
+ while ((numRead = in.read(buf)) >= 0) {
+ out.write(buf, 0, numRead);
+ }
+ } finally {
+ if (in != null) {
+ in.close();
+ }
+ }
+ metadataMap.put(key,
+ new FileMetadata(key, file.length(), System.currentTimeMillis()));
+ dataMap.put(key, out.toByteArray());
+ }
+
+ public InputStream retrieve(String key) throws IOException {
+ return retrieve(key, 0);
+ }
+
+ public InputStream retrieve(String key, long byteRangeStart)
+ throws IOException {
+
+ byte[] data = dataMap.get(key);
+ File file = createTempFile();
+ BufferedOutputStream out = null;
+ try {
+ out = new BufferedOutputStream(new FileOutputStream(file));
+ out.write(data, (int) byteRangeStart,
+ data.length - (int) byteRangeStart);
+ } finally {
+ if (out != null) {
+ out.close();
+ }
+ }
+ return new FileInputStream(file);
+ }
+
+ private File createTempFile() throws IOException {
+ File dir = new File(conf.get("fs.s3.buffer.dir"));
+ if (!dir.exists() && !dir.mkdirs()) {
+ throw new IOException("Cannot create S3 buffer directory: " + dir);
+ }
+ File result = File.createTempFile("test-", ".tmp", dir);
+ result.deleteOnExit();
+ return result;
+ }
+
+ public FileMetadata retrieveMetadata(String key) throws IOException {
+ return metadataMap.get(key);
+ }
+
+ public PartialListing list(String prefix, int maxListingLength)
+ throws IOException {
+ return list(prefix, maxListingLength, null);
+ }
+
+ public PartialListing list(String prefix, int maxListingLength,
+ String priorLastKey) throws IOException {
+
+ return list(prefix, PATH_DELIMITER, maxListingLength, priorLastKey);
+ }
+
+ public PartialListing listAll(String prefix, int maxListingLength,
+ String priorLastKey) throws IOException {
+
+ return list(prefix, null, maxListingLength, priorLastKey);
+ }
+
+ private PartialListing list(String prefix, String delimiter,
+ int maxListingLength, String priorLastKey) throws IOException {
+
+ if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
+ prefix += PATH_DELIMITER;
+ }
+
+ List metadata = new ArrayList();
+ SortedSet commonPrefixes = new TreeSet();
+ for (String key : dataMap.keySet()) {
+ if (key.startsWith(prefix)) {
+ if (delimiter == null) {
+ metadata.add(retrieveMetadata(key));
+ } else {
+ int delimIndex = key.indexOf(delimiter, prefix.length());
+ if (delimIndex == -1) {
+ metadata.add(retrieveMetadata(key));
+ } else {
+ String commonPrefix = key.substring(0, delimIndex);
+ commonPrefixes.add(commonPrefix);
+ }
+ }
+ }
+ if (metadata.size() + commonPrefixes.size() == maxListingLength) {
+ new PartialListing(key, metadata.toArray(new FileMetadata[0]),
+ commonPrefixes.toArray(new String[0]));
+ }
+ }
+ return new PartialListing(null, metadata.toArray(new FileMetadata[0]),
+ commonPrefixes.toArray(new String[0]));
+ }
+
+ public void delete(String key) throws IOException {
+ metadataMap.remove(key);
+ dataMap.remove(key);
+ }
+
+ public void rename(String srcKey, String dstKey) throws IOException {
+ metadataMap.put(dstKey, metadataMap.remove(srcKey));
+ dataMap.put(dstKey, dataMap.remove(srcKey));
+ }
+
+ public void purge(String prefix) throws IOException {
+ Iterator> i =
+ metadataMap.entrySet().iterator();
+ while (i.hasNext()) {
+ Entry entry = i.next();
+ if (entry.getKey().startsWith(prefix)) {
+ dataMap.remove(entry.getKey());
+ i.remove();
+ }
+ }
+ }
+
+ public void dump() throws IOException {
+ System.out.println(metadataMap.values());
+ System.out.println(dataMap.keySet());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java b/src/test/core/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java
new file mode 100644
index 0000000000..6516c836f8
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3native/Jets3tNativeS3FileSystemContractTest.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import java.io.IOException;
+
+public class Jets3tNativeS3FileSystemContractTest
+ extends NativeS3FileSystemContractBaseTest {
+
+ @Override
+ NativeFileSystemStore getNativeFileSystemStore() throws IOException {
+ return new Jets3tNativeFileSystemStore();
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java b/src/test/core/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
new file mode 100644
index 0000000000..bf2e3c3d38
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+
+public abstract class NativeS3FileSystemContractBaseTest
+ extends FileSystemContractBaseTest {
+
+ private NativeFileSystemStore store;
+
+ abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException;
+
+ @Override
+ protected void setUp() throws Exception {
+ Configuration conf = new Configuration();
+ store = getNativeFileSystemStore();
+ fs = new NativeS3FileSystem(store);
+ fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ store.purge("test");
+ super.tearDown();
+ }
+
+ public void testListStatusForRoot() throws Exception {
+ Path testDir = path("/test");
+ assertTrue(fs.mkdirs(testDir));
+
+ FileStatus[] paths = fs.listStatus(path("/"));
+ assertEquals(1, paths.length);
+ assertEquals(path("/test"), paths[0].getPath());
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java b/src/test/core/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
new file mode 100644
index 0000000000..664d39e6f4
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import java.io.IOException;
+
+public class TestInMemoryNativeS3FileSystemContract
+ extends NativeS3FileSystemContractBaseTest {
+
+ @Override
+ NativeFileSystemStore getNativeFileSystemStore() throws IOException {
+ return new InMemoryNativeFileSystemStore();
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/http/TestGlobalFilter.java b/src/test/core/org/apache/hadoop/http/TestGlobalFilter.java
new file mode 100644
index 0000000000..51ab60697f
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/http/TestGlobalFilter.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Set;
+import java.util.TreeSet;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+public class TestGlobalFilter extends junit.framework.TestCase {
+ static final Log LOG = LogFactory.getLog(HttpServer.class);
+ static final Set RECORDS = new TreeSet();
+
+ /** A very simple filter that records accessed uri's */
+ static public class RecordingFilter implements Filter {
+ private FilterConfig filterConfig = null;
+
+ public void init(FilterConfig filterConfig) {
+ this.filterConfig = filterConfig;
+ }
+
+ public void destroy() {
+ this.filterConfig = null;
+ }
+
+ public void doFilter(ServletRequest request, ServletResponse response,
+ FilterChain chain) throws IOException, ServletException {
+ if (filterConfig == null)
+ return;
+
+ String uri = ((HttpServletRequest)request).getRequestURI();
+ LOG.info("filtering " + uri);
+ RECORDS.add(uri);
+ chain.doFilter(request, response);
+ }
+
+ /** Configuration for RecordingFilter */
+ static public class Initializer extends FilterInitializer {
+ public Initializer() {}
+
+ void initFilter(FilterContainer container) {
+ container.addGlobalFilter("recording", RecordingFilter.class.getName(), null);
+ }
+ }
+ }
+
+
+ /** access a url, ignoring some IOException such as the page does not exist */
+ static void access(String urlstring) throws IOException {
+ LOG.warn("access " + urlstring);
+ URL url = new URL(urlstring);
+ URLConnection connection = url.openConnection();
+ connection.connect();
+
+ try {
+ BufferedReader in = new BufferedReader(new InputStreamReader(
+ connection.getInputStream()));
+ try {
+ for(; in.readLine() != null; );
+ } finally {
+ in.close();
+ }
+ } catch(IOException ioe) {
+ LOG.warn("urlstring=" + urlstring, ioe);
+ }
+ }
+
+ public void testServletFilter() throws Exception {
+ Configuration conf = new Configuration();
+
+ //start a http server with CountingFilter
+ conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+ RecordingFilter.Initializer.class.getName());
+ HttpServer http = new HttpServer("datanode", "localhost", 0, true, conf);
+ http.start();
+
+ final String fsckURL = "/fsck";
+ final String stacksURL = "/stacks";
+ final String ajspURL = "/a.jsp";
+ final String listPathsURL = "/listPaths";
+ final String dataURL = "/data";
+ final String streamFile = "/streamFile";
+ final String rootURL = "/";
+ final String allURL = "/*";
+ final String outURL = "/static/a.out";
+ final String logURL = "/logs/a.log";
+
+ final String[] urls = {fsckURL, stacksURL, ajspURL, listPathsURL,
+ dataURL, streamFile, rootURL, allURL, outURL, logURL};
+
+ //access the urls
+ final String prefix = "http://localhost:" + http.getPort();
+ try {
+ for(int i = 0; i < urls.length; i++) {
+ access(prefix + urls[i]);
+ }
+ } finally {
+ http.stop();
+ }
+
+ LOG.info("RECORDS = " + RECORDS);
+
+ //verify records
+ for(int i = 0; i < urls.length; i++) {
+ assertTrue(RECORDS.remove(urls[i]));
+ }
+ assertTrue(RECORDS.isEmpty());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/http/TestServletFilter.java b/src/test/core/org/apache/hadoop/http/TestServletFilter.java
new file mode 100644
index 0000000000..8052f9ad49
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/http/TestServletFilter.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Random;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+public class TestServletFilter extends junit.framework.TestCase {
+ static final Log LOG = LogFactory.getLog(HttpServer.class);
+ static volatile String uri = null;
+
+ /** A very simple filter which record the uri filtered. */
+ static public class SimpleFilter implements Filter {
+ private FilterConfig filterConfig = null;
+
+ public void init(FilterConfig filterConfig) {
+ this.filterConfig = filterConfig;
+ }
+
+ public void destroy() {
+ this.filterConfig = null;
+ }
+
+ public void doFilter(ServletRequest request, ServletResponse response,
+ FilterChain chain) throws IOException, ServletException {
+ if (filterConfig == null)
+ return;
+
+ uri = ((HttpServletRequest)request).getRequestURI();
+ LOG.info("filtering " + uri);
+ chain.doFilter(request, response);
+ }
+
+ /** Configuration for the filter */
+ static public class Initializer extends FilterInitializer {
+ public Initializer() {}
+
+ void initFilter(FilterContainer container) {
+ container.addFilter("simple", SimpleFilter.class.getName(), null);
+ }
+ }
+ }
+
+
+ /** access a url, ignoring some IOException such as the page does not exist */
+ static void access(String urlstring) throws IOException {
+ LOG.warn("access " + urlstring);
+ URL url = new URL(urlstring);
+ URLConnection connection = url.openConnection();
+ connection.connect();
+
+ try {
+ BufferedReader in = new BufferedReader(new InputStreamReader(
+ connection.getInputStream()));
+ try {
+ for(; in.readLine() != null; );
+ } finally {
+ in.close();
+ }
+ } catch(IOException ioe) {
+ LOG.warn("urlstring=" + urlstring, ioe);
+ }
+ }
+
+ public void testServletFilter() throws Exception {
+ Configuration conf = new Configuration();
+
+ //start a http server with CountingFilter
+ conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+ SimpleFilter.Initializer.class.getName());
+ HttpServer http = new HttpServer("datanode", "localhost", 0, true, conf);
+ http.start();
+
+ final String fsckURL = "/fsck";
+ final String stacksURL = "/stacks";
+ final String ajspURL = "/a.jsp";
+ final String logURL = "/logs/a.log";
+ final String hadooplogoURL = "/static/hadoop-logo.jpg";
+
+ final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL};
+ final Random ran = new Random();
+ final int[] sequence = new int[50];
+
+ //generate a random sequence and update counts
+ for(int i = 0; i < sequence.length; i++) {
+ sequence[i] = ran.nextInt(urls.length);
+ }
+
+ //access the urls as the sequence
+ final String prefix = "http://localhost:" + http.getPort();
+ try {
+ for(int i = 0; i < sequence.length; i++) {
+ access(prefix + urls[sequence[i]]);
+
+ //make sure everything except fsck get filtered
+ if (sequence[i] == 0) {
+ assertEquals(null, uri);
+ } else {
+ assertEquals(urls[sequence[i]], uri);
+ uri = null;
+ }
+ }
+ } finally {
+ http.stop();
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/RandomDatum.java b/src/test/core/org/apache/hadoop/io/RandomDatum.java
new file mode 100644
index 0000000000..ab8f34feba
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/RandomDatum.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.util.*;
+import java.io.*;
+
+public class RandomDatum implements WritableComparable {
+ private int length;
+ private byte[] data;
+
+ public RandomDatum() {}
+
+ public RandomDatum(Random random) {
+ length = 10 + (int) Math.pow(10.0, random.nextFloat() * 3.0);
+ data = new byte[length];
+ random.nextBytes(data);
+ }
+
+ public int getLength() {
+ return length;
+ }
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(length);
+ out.write(data);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ length = in.readInt();
+ if (data == null || length > data.length)
+ data = new byte[length];
+ in.readFully(data, 0, length);
+ }
+
+ public int compareTo(Object o) {
+ RandomDatum that = (RandomDatum)o;
+ return WritableComparator.compareBytes(this.data, 0, this.length,
+ that.data, 0, that.length);
+ }
+
+ public boolean equals(Object o) {
+ return compareTo(o) == 0;
+ }
+
+ private static final char[] HEX_DIGITS =
+ {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
+
+ /** Returns a string representation of this object. */
+ public String toString() {
+ StringBuffer buf = new StringBuffer(length*2);
+ for (int i = 0; i < length; i++) {
+ int b = data[i];
+ buf.append(HEX_DIGITS[(b >> 4) & 0xf]);
+ buf.append(HEX_DIGITS[b & 0xf]);
+ }
+ return buf.toString();
+ }
+
+ public static class Generator {
+ Random random;
+
+ private RandomDatum key;
+ private RandomDatum value;
+
+ public Generator() { random = new Random(); }
+ public Generator(int seed) { random = new Random(seed); }
+
+ public RandomDatum getKey() { return key; }
+ public RandomDatum getValue() { return value; }
+
+ public void next() {
+ key = new RandomDatum(random);
+ value = new RandomDatum(random);
+ }
+ }
+
+ /** A WritableComparator optimized for RandomDatum. */
+ public static class Comparator extends WritableComparator {
+ public Comparator() {
+ super(RandomDatum.class);
+ }
+
+ public int compare(byte[] b1, int s1, int l1,
+ byte[] b2, int s2, int l2) {
+ int n1 = readInt(b1, s1);
+ int n2 = readInt(b2, s2);
+ return compareBytes(b1, s1+4, n1, b2, s2+4, n2);
+ }
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestArrayFile.java b/src/test/core/org/apache/hadoop/io/TestArrayFile.java
new file mode 100644
index 0000000000..f279bd7431
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestArrayFile.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.io.*;
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.*;
+
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.conf.*;
+
+/** Support for flat files of binary key/value pairs. */
+public class TestArrayFile extends TestCase {
+ private static final Log LOG = LogFactory.getLog(TestArrayFile.class);
+ private static String FILE =
+ System.getProperty("test.build.data",".") + "/test.array";
+
+ public TestArrayFile(String name) {
+ super(name);
+ }
+
+ public void testArrayFile() throws Exception {
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.getLocal(conf);
+ RandomDatum[] data = generate(10000);
+ writeTest(fs, data, FILE);
+ readTest(fs, data, FILE, conf);
+ }
+
+ public void testEmptyFile() throws Exception {
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.getLocal(conf);
+ writeTest(fs, new RandomDatum[0], FILE);
+ ArrayFile.Reader reader = new ArrayFile.Reader(fs, FILE, conf);
+ assertNull(reader.get(0, new RandomDatum()));
+ reader.close();
+ }
+
+ private static RandomDatum[] generate(int count) {
+ LOG.debug("generating " + count + " records in debug");
+ RandomDatum[] data = new RandomDatum[count];
+ RandomDatum.Generator generator = new RandomDatum.Generator();
+ for (int i = 0; i < count; i++) {
+ generator.next();
+ data[i] = generator.getValue();
+ }
+ return data;
+ }
+
+ private static void writeTest(FileSystem fs, RandomDatum[] data, String file)
+ throws IOException {
+ Configuration conf = new Configuration();
+ MapFile.delete(fs, file);
+ LOG.debug("creating with " + data.length + " debug");
+ ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, file, RandomDatum.class);
+ writer.setIndexInterval(100);
+ for (int i = 0; i < data.length; i++)
+ writer.append(data[i]);
+ writer.close();
+ }
+
+ private static void readTest(FileSystem fs, RandomDatum[] data, String file, Configuration conf)
+ throws IOException {
+ RandomDatum v = new RandomDatum();
+ LOG.debug("reading " + data.length + " debug");
+ ArrayFile.Reader reader = new ArrayFile.Reader(fs, file, conf);
+ for (int i = 0; i < data.length; i++) { // try forwards
+ reader.get(i, v);
+ if (!v.equals(data[i])) {
+ throw new RuntimeException("wrong value at " + i);
+ }
+ }
+ for (int i = data.length-1; i >= 0; i--) { // then backwards
+ reader.get(i, v);
+ if (!v.equals(data[i])) {
+ throw new RuntimeException("wrong value at " + i);
+ }
+ }
+ reader.close();
+ LOG.debug("done reading " + data.length + " debug");
+ }
+
+
+ /** For debugging and testing. */
+ public static void main(String[] args) throws Exception {
+ int count = 1024 * 1024;
+ boolean create = true;
+ boolean check = true;
+ String file = FILE;
+ String usage = "Usage: TestArrayFile [-count N] [-nocreate] [-nocheck] file";
+
+ if (args.length == 0) {
+ System.err.println(usage);
+ System.exit(-1);
+ }
+
+ Configuration conf = new Configuration();
+ int i = 0;
+ Path fpath = null;
+ FileSystem fs = null;
+ try {
+ for (; i < args.length; i++) { // parse command line
+ if (args[i] == null) {
+ continue;
+ } else if (args[i].equals("-count")) {
+ count = Integer.parseInt(args[++i]);
+ } else if (args[i].equals("-nocreate")) {
+ create = false;
+ } else if (args[i].equals("-nocheck")) {
+ check = false;
+ } else {
+ // file is required parameter
+ file = args[i];
+ fpath=new Path(file);
+ }
+ }
+
+ fs = fpath.getFileSystem(conf);
+
+ LOG.info("count = " + count);
+ LOG.info("create = " + create);
+ LOG.info("check = " + check);
+ LOG.info("file = " + file);
+
+ RandomDatum[] data = generate(count);
+
+ if (create) {
+ writeTest(fs, data, file);
+ }
+
+ if (check) {
+ readTest(fs, data, file, conf);
+ }
+ } finally {
+ fs.close();
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestArrayWritable.java b/src/test/core/org/apache/hadoop/io/TestArrayWritable.java
new file mode 100644
index 0000000000..47d0ce9f63
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestArrayWritable.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.io.*;
+
+import junit.framework.TestCase;
+
+/** Unit tests for ArrayWritable */
+public class TestArrayWritable extends TestCase {
+
+ static class TextArrayWritable extends ArrayWritable {
+ public TextArrayWritable() {
+ super(Text.class);
+ }
+ }
+
+ public TestArrayWritable(String name) {
+ super(name);
+ }
+
+ /**
+ * If valueClass is undefined, readFields should throw an exception indicating
+ * that the field is null. Otherwise, readFields should succeed.
+ */
+ public void testThrowUndefinedValueException() throws IOException {
+ // Get a buffer containing a simple text array
+ Text[] elements = {new Text("zero"), new Text("one"), new Text("two")};
+ TextArrayWritable sourceArray = new TextArrayWritable();
+ sourceArray.set(elements);
+
+ // Write it to a normal output buffer
+ DataOutputBuffer out = new DataOutputBuffer();
+ DataInputBuffer in = new DataInputBuffer();
+ sourceArray.write(out);
+
+ // Read the output buffer with TextReadable. Since the valueClass is defined,
+ // this should succeed
+ TextArrayWritable destArray = new TextArrayWritable();
+ in.reset(out.getData(), out.getLength());
+ destArray.readFields(in);
+ Writable[] destElements = destArray.get();
+ assertTrue(destElements.length == elements.length);
+ for (int i = 0; i < elements.length; i++) {
+ assertEquals(destElements[i],elements[i]);
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestBloomMapFile.java b/src/test/core/org/apache/hadoop/io/TestBloomMapFile.java
new file mode 100644
index 0000000000..2a7d22455f
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestBloomMapFile.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import junit.framework.TestCase;
+
+public class TestBloomMapFile extends TestCase {
+ private static Configuration conf = new Configuration();
+
+ public void testMembershipTest() throws Exception {
+ // write the file
+ Path dirName = new Path(System.getProperty("test.build.data",".") +
+ getName() + ".bloommapfile");
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path qualifiedDirName = fs.makeQualified(dirName);
+ conf.setInt("io.mapfile.bloom.size", 2048);
+ BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
+ qualifiedDirName.toString(), IntWritable.class, Text.class);
+ IntWritable key = new IntWritable();
+ Text value = new Text();
+ for (int i = 0; i < 2000; i += 2) {
+ key.set(i);
+ value.set("00" + i);
+ writer.append(key, value);
+ }
+ writer.close();
+
+ BloomMapFile.Reader reader = new BloomMapFile.Reader(fs,
+ qualifiedDirName.toString(), conf);
+ // check false positives rate
+ int falsePos = 0;
+ int falseNeg = 0;
+ for (int i = 0; i < 2000; i++) {
+ key.set(i);
+ boolean exists = reader.probablyHasKey(key);
+ if (i % 2 == 0) {
+ if (!exists) falseNeg++;
+ } else {
+ if (exists) falsePos++;
+ }
+ }
+ reader.close();
+ fs.delete(qualifiedDirName, true);
+ System.out.println("False negatives: " + falseNeg);
+ assertEquals(0, falseNeg);
+ System.out.println("False positives: " + falsePos);
+ assertTrue(falsePos < 2);
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestBytesWritable.java b/src/test/core/org/apache/hadoop/io/TestBytesWritable.java
new file mode 100644
index 0000000000..35e0d0ed82
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestBytesWritable.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import junit.framework.TestCase;
+
+/**
+ * This is the unit test for BytesWritable.
+ */
+public class TestBytesWritable extends TestCase {
+
+ public void testSizeChange() throws Exception {
+ byte[] hadoop = "hadoop".getBytes();
+ BytesWritable buf = new BytesWritable(hadoop);
+ int size = buf.getLength();
+ int orig_capacity = buf.getCapacity();
+ buf.setSize(size*2);
+ int new_capacity = buf.getCapacity();
+ System.arraycopy(buf.getBytes(), 0, buf.getBytes(), size, size);
+ assertTrue(new_capacity >= size * 2);
+ assertEquals(size * 2, buf.getLength());
+ assertTrue(new_capacity != orig_capacity);
+ buf.setSize(size*4);
+ assertTrue(new_capacity != buf.getCapacity());
+ for(int i=0; i < size*2; ++i) {
+ assertEquals(hadoop[i%size], buf.getBytes()[i]);
+ }
+ // shrink the buffer
+ buf.setCapacity(1);
+ // make sure the size has been cut down too
+ assertEquals(1, buf.getLength());
+ // but that the data is still there
+ assertEquals(hadoop[0], buf.getBytes()[0]);
+ }
+
+ public void testHash() throws Exception {
+ byte[] owen = "owen".getBytes();
+ BytesWritable buf = new BytesWritable(owen);
+ assertEquals(4347922, buf.hashCode());
+ buf.setCapacity(10000);
+ assertEquals(4347922, buf.hashCode());
+ buf.setSize(0);
+ assertEquals(1, buf.hashCode());
+ }
+
+ public void testCompare() throws Exception {
+ byte[][] values = new byte[][]{"abc".getBytes(),
+ "ad".getBytes(),
+ "abcd".getBytes(),
+ "".getBytes(),
+ "b".getBytes()};
+ BytesWritable[] buf = new BytesWritable[values.length];
+ for(int i=0; i < values.length; ++i) {
+ buf[i] = new BytesWritable(values[i]);
+ }
+ // check to make sure the compare function is symetric and reflexive
+ for(int i=0; i < values.length; ++i) {
+ for(int j=0; j < values.length; ++j) {
+ assertTrue(buf[i].compareTo(buf[j]) == -buf[j].compareTo(buf[i]));
+ assertTrue((i == j) == (buf[i].compareTo(buf[j]) == 0));
+ }
+ }
+ assertTrue(buf[0].compareTo(buf[1]) < 0);
+ assertTrue(buf[1].compareTo(buf[2]) > 0);
+ assertTrue(buf[2].compareTo(buf[3]) > 0);
+ assertTrue(buf[3].compareTo(buf[4]) < 0);
+ }
+
+ private void checkToString(byte[] input, String expected) {
+ String actual = new BytesWritable(input).toString();
+ assertEquals(expected, actual);
+ }
+
+ public void testToString() {
+ checkToString(new byte[]{0,1,2,0x10}, "00 01 02 10");
+ checkToString(new byte[]{-0x80, -0x7f, -0x1, -0x2, 1, 0},
+ "80 81 ff fe 01 00");
+ }
+}
+
diff --git a/src/test/core/org/apache/hadoop/io/TestDefaultStringifier.java b/src/test/core/org/apache/hadoop/io/TestDefaultStringifier.java
new file mode 100644
index 0000000000..c96cc73293
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestDefaultStringifier.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+public class TestDefaultStringifier extends TestCase {
+
+ private static Configuration conf = new Configuration();
+ private static final Log LOG = LogFactory.getLog(TestDefaultStringifier.class);
+
+ private char[] alphabet = "abcdefghijklmnopqrstuvwxyz".toCharArray();
+
+ public void testWithWritable() throws Exception {
+
+ conf.set("io.serializations", "org.apache.hadoop.io.serializer.WritableSerialization");
+
+ LOG.info("Testing DefaultStringifier with Text");
+
+ Random random = new Random();
+
+ //test with a Text
+ for(int i=0;i<10;i++) {
+ //generate a random string
+ StringBuilder builder = new StringBuilder();
+ int strLen = random.nextInt(40);
+ for(int j=0; j< strLen; j++) {
+ builder.append(alphabet[random.nextInt(alphabet.length)]);
+ }
+ Text text = new Text(builder.toString());
+ DefaultStringifier stringifier = new DefaultStringifier(conf, Text.class);
+
+ String str = stringifier.toString(text);
+ Text claimedText = stringifier.fromString(str);
+ LOG.info("Object: " + text);
+ LOG.info("String representation of the object: " + str);
+ assertEquals(text, claimedText);
+ }
+ }
+
+ public void testWithJavaSerialization() throws Exception {
+ conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
+
+ LOG.info("Testing DefaultStringifier with Serializable Integer");
+
+ //Integer implements Serializable
+ Integer testInt = Integer.valueOf(42);
+ DefaultStringifier stringifier = new DefaultStringifier(conf, Integer.class);
+
+ String str = stringifier.toString(testInt);
+ Integer claimedInt = stringifier.fromString(str);
+ LOG.info("String representation of the object: " + str);
+
+ assertEquals(testInt, claimedInt);
+ }
+
+ public void testStoreLoad() throws IOException {
+
+ LOG.info("Testing DefaultStringifier#store() and #load()");
+ conf.set("io.serializations", "org.apache.hadoop.io.serializer.WritableSerialization");
+ Text text = new Text("uninteresting test string");
+ String keyName = "test.defaultstringifier.key1";
+
+ DefaultStringifier.store(conf,text, keyName);
+
+ Text claimedText = DefaultStringifier.load(conf, keyName, Text.class);
+ assertEquals("DefaultStringifier#load() or #store() might be flawed"
+ , text, claimedText);
+
+ }
+
+ public void testStoreLoadArray() throws IOException {
+ LOG.info("Testing DefaultStringifier#storeArray() and #loadArray()");
+ conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
+
+ String keyName = "test.defaultstringifier.key2";
+
+ Integer[] array = new Integer[] {1,2,3,4,5};
+
+
+ DefaultStringifier.storeArray(conf, array, keyName);
+
+ Integer[] claimedArray = DefaultStringifier.loadArray(conf, keyName, Integer.class);
+ for (int i = 0; i < array.length; i++) {
+ assertEquals("two arrays are not equal", array[i], claimedArray[i]);
+ }
+
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestEnumSetWritable.java b/src/test/core/org/apache/hadoop/io/TestEnumSetWritable.java
new file mode 100644
index 0000000000..a512bb1bc2
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestEnumSetWritable.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import junit.framework.TestCase;
+
+/** Unit test for EnumSetWritable */
+public class TestEnumSetWritable extends TestCase {
+
+ enum TestEnumSet {
+ CREATE, OVERWRITE, APPEND;
+ }
+
+ EnumSet nonEmptyFlag = EnumSet.of(TestEnumSet.APPEND);
+ EnumSetWritable nonEmptyFlagWritable = new EnumSetWritable(
+ nonEmptyFlag);
+
+ @SuppressWarnings("unchecked")
+ public void testSerializeAndDeserializeNonEmpty() throws IOException {
+ DataOutputBuffer out = new DataOutputBuffer();
+ ObjectWritable.writeObject(out, nonEmptyFlagWritable, nonEmptyFlagWritable
+ .getClass(), null);
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), out.getLength());
+ EnumSet read = ((EnumSetWritable) ObjectWritable
+ .readObject(in, null)).get();
+ assertEquals(read, nonEmptyFlag);
+ }
+
+ EnumSet emptyFlag = EnumSet.noneOf(TestEnumSet.class);
+
+ @SuppressWarnings("unchecked")
+ public void testSerializeAndDeserializeEmpty() throws IOException {
+
+ boolean gotException = false;
+ try {
+ new EnumSetWritable(emptyFlag);
+ } catch (RuntimeException e) {
+ gotException = true;
+ }
+
+ assertTrue(
+ "Instantiate empty EnumSetWritable with no element type class providesd should throw exception.",
+ gotException);
+
+ EnumSetWritable emptyFlagWritable = new EnumSetWritable(
+ emptyFlag, TestEnumSet.class);
+ DataOutputBuffer out = new DataOutputBuffer();
+ ObjectWritable.writeObject(out, emptyFlagWritable, emptyFlagWritable
+ .getClass(), null);
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), out.getLength());
+ EnumSet read = ((EnumSetWritable) ObjectWritable
+ .readObject(in, null)).get();
+ assertEquals(read, emptyFlag);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testSerializeAndDeserializeNull() throws IOException {
+
+ boolean gotException = false;
+ try {
+ new EnumSetWritable(null);
+ } catch (RuntimeException e) {
+ gotException = true;
+ }
+
+ assertTrue(
+ "Instantiate empty EnumSetWritable with no element type class providesd should throw exception.",
+ gotException);
+
+ EnumSetWritable nullFlagWritable = new EnumSetWritable(
+ null, TestEnumSet.class);
+
+ DataOutputBuffer out = new DataOutputBuffer();
+ ObjectWritable.writeObject(out, nullFlagWritable, nullFlagWritable
+ .getClass(), null);
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), out.getLength());
+ EnumSet read = ((EnumSetWritable) ObjectWritable
+ .readObject(in, null)).get();
+ assertEquals(read, null);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestGenericWritable.java b/src/test/core/org/apache/hadoop/io/TestGenericWritable.java
new file mode 100644
index 0000000000..486d93d438
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestGenericWritable.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * TestCase for {@link GenericWritable} class.
+ * @see TestWritable#testWritable(Writable)
+ */
+public class TestGenericWritable extends TestCase {
+
+ private Configuration conf;
+ public static final String CONF_TEST_KEY = "test.generic.writable";
+ public static final String CONF_TEST_VALUE = "dummy";
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ conf = new Configuration();
+ //set the configuration parameter
+ conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
+ }
+
+ /** Dummy class for testing {@link GenericWritable} */
+ public static class Foo implements Writable {
+ private String foo = "foo";
+ public void readFields(DataInput in) throws IOException {
+ foo = Text.readString(in);
+ }
+ public void write(DataOutput out) throws IOException {
+ Text.writeString(out, foo);
+ }
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof Foo))
+ return false;
+ return this.foo.equals(((Foo)obj).foo);
+ }
+ }
+ /** Dummy class for testing {@link GenericWritable} */
+ public static class Bar implements Writable, Configurable {
+ private int bar = 42; //The Answer to The Ultimate Question Of Life, the Universe and Everything
+ private Configuration conf = null;
+ public void readFields(DataInput in) throws IOException {
+ bar = in.readInt();
+ }
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(bar);
+ }
+ public Configuration getConf() {
+ return conf;
+ }
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof Bar))
+ return false;
+ return this.bar == ((Bar)obj).bar;
+ }
+ }
+
+ /** Dummy class for testing {@link GenericWritable} */
+ public static class Baz extends Bar {
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ //needs a configuration parameter
+ assertEquals("Configuration is not set for the wrapped object",
+ CONF_TEST_VALUE, getConf().get(CONF_TEST_KEY));
+ }
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ }
+ }
+
+ /** Dummy class for testing {@link GenericWritable} */
+ public static class FooGenericWritable extends GenericWritable {
+ @Override
+ @SuppressWarnings("unchecked")
+ protected Class extends Writable>[] getTypes() {
+ return new Class[] {Foo.class, Bar.class, Baz.class};
+ }
+ @Override
+ public boolean equals(Object obj) {
+ if(! (obj instanceof FooGenericWritable))
+ return false;
+ return get().equals(((FooGenericWritable)obj).get());
+ }
+ }
+
+ public void testFooWritable() throws Exception {
+ System.out.println("Testing Writable wrapped in GenericWritable");
+ FooGenericWritable generic = new FooGenericWritable();
+ generic.setConf(conf);
+ Foo foo = new Foo();
+ generic.set(foo);
+ TestWritable.testWritable(generic);
+ }
+
+ public void testBarWritable() throws Exception {
+ System.out.println("Testing Writable, Configurable wrapped in GenericWritable");
+ FooGenericWritable generic = new FooGenericWritable();
+ generic.setConf(conf);
+ Bar bar = new Bar();
+ bar.setConf(conf);
+ generic.set(bar);
+
+ //test writing generic writable
+ FooGenericWritable after
+ = (FooGenericWritable)TestWritable.testWritable(generic, conf);
+
+ //test configuration
+ System.out.println("Testing if Configuration is passed to wrapped classes");
+ assertTrue(after.get() instanceof Configurable);
+ assertNotNull(((Configurable)after.get()).getConf());
+ }
+
+ public void testBazWritable() throws Exception {
+ System.out.println("Testing for GenericWritable to find class names");
+ FooGenericWritable generic = new FooGenericWritable();
+ generic.setConf(conf);
+ Baz baz = new Baz();
+ generic.set(baz);
+ TestWritable.testWritable(generic, conf);
+ }
+
+ public void testSet() throws Exception {
+ Foo foo = new Foo();
+ FooGenericWritable generic = new FooGenericWritable();
+ //exception should not occur
+ generic.set(foo);
+
+ try {
+ //exception should occur, since IntWritable is not registered
+ generic = new FooGenericWritable();
+ generic.set(new IntWritable(1));
+ fail("Generic writable should have thrown an exception for a Writable not registered");
+ }catch (RuntimeException e) {
+ //ignore
+ }
+
+ }
+
+ public void testGet() throws Exception {
+ Foo foo = new Foo();
+ FooGenericWritable generic = new FooGenericWritable();
+ generic.set(foo);
+ assertEquals(foo, generic.get());
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestMD5Hash.java b/src/test/core/org/apache/hadoop/io/TestMD5Hash.java
new file mode 100644
index 0000000000..feb1107ed4
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestMD5Hash.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import org.apache.hadoop.io.TestWritable;
+import junit.framework.TestCase;
+import java.security.MessageDigest;
+import java.util.Random;
+
+/** Unit tests for MD5Hash. */
+public class TestMD5Hash extends TestCase {
+ public TestMD5Hash(String name) { super(name); }
+
+ private static final Random RANDOM = new Random();
+
+ public static MD5Hash getTestHash() throws Exception {
+ MessageDigest digest = MessageDigest.getInstance("MD5");
+ byte[] buffer = new byte[1024];
+ RANDOM.nextBytes(buffer);
+ digest.update(buffer);
+ return new MD5Hash(digest.digest());
+ }
+
+ protected static byte[] D00 = new byte[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ protected static byte[] DFF = new byte[] {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+ public void testMD5Hash() throws Exception {
+ MD5Hash md5Hash = getTestHash();
+
+ final MD5Hash md5Hash00
+ = new MD5Hash(D00);
+
+ final MD5Hash md5HashFF
+ = new MD5Hash(DFF);
+
+ MD5Hash orderedHash = new MD5Hash(new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,
+ 13,14,15,16});
+ MD5Hash backwardHash = new MD5Hash(new byte[]{-1,-2,-3,-4,-5,-6,-7,-8,
+ -9,-10,-11,-12, -13, -14,
+ -15,-16});
+ MD5Hash closeHash1 = new MD5Hash(new byte[]{-1,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0});
+ MD5Hash closeHash2 = new MD5Hash(new byte[]{-1,1,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0});
+
+ // test i/o
+ TestWritable.testWritable(md5Hash);
+ TestWritable.testWritable(md5Hash00);
+ TestWritable.testWritable(md5HashFF);
+
+ // test equals()
+ assertEquals(md5Hash, md5Hash);
+ assertEquals(md5Hash00, md5Hash00);
+ assertEquals(md5HashFF, md5HashFF);
+
+ // test compareTo()
+ assertTrue(md5Hash.compareTo(md5Hash) == 0);
+ assertTrue(md5Hash00.compareTo(md5Hash) < 0);
+ assertTrue(md5HashFF.compareTo(md5Hash) > 0);
+
+ // test toString and string ctor
+ assertEquals(md5Hash, new MD5Hash(md5Hash.toString()));
+ assertEquals(md5Hash00, new MD5Hash(md5Hash00.toString()));
+ assertEquals(md5HashFF, new MD5Hash(md5HashFF.toString()));
+
+ assertEquals(0x01020304, orderedHash.quarterDigest());
+ assertEquals(0xfffefdfc, backwardHash.quarterDigest());
+
+ assertEquals(0x0102030405060708L, orderedHash.halfDigest());
+ assertEquals(0xfffefdfcfbfaf9f8L, backwardHash.halfDigest());
+ assertTrue("hash collision",
+ closeHash1.hashCode() != closeHash2.hashCode());
+
+ Thread t1 = new Thread() {
+ public void run() {
+ for (int i = 0; i < 100; i++) {
+ MD5Hash hash = new MD5Hash(DFF);
+ assertEquals(hash, md5HashFF);
+ }
+ }
+ };
+
+ Thread t2 = new Thread() {
+ public void run() {
+ for (int i = 0; i < 100; i++) {
+ MD5Hash hash = new MD5Hash(D00);
+ assertEquals(hash, md5Hash00);
+ }
+ }
+ };
+
+ t1.start();
+ t2.start();
+ t1.join();
+ t2.join();
+
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestMapFile.java b/src/test/core/org/apache/hadoop/io/TestMapFile.java
new file mode 100644
index 0000000000..f006d4f401
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestMapFile.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import junit.framework.TestCase;
+
+public class TestMapFile extends TestCase {
+ private static Configuration conf = new Configuration();
+
+ /**
+ * Test getClosest feature.
+ * @throws Exception
+ */
+ public void testGetClosest() throws Exception {
+ // Write a mapfile of simple data: keys are
+ Path dirName = new Path(System.getProperty("test.build.data",".") +
+ getName() + ".mapfile");
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path qualifiedDirName = fs.makeQualified(dirName);
+ // Make an index entry for every third insertion.
+ MapFile.Writer.setIndexInterval(conf, 3);
+ MapFile.Writer writer = new MapFile.Writer(conf, fs,
+ qualifiedDirName.toString(), Text.class, Text.class);
+ // Assert that the index interval is 1
+ assertEquals(3, writer.getIndexInterval());
+ // Add entries up to 100 in intervals of ten.
+ final int FIRST_KEY = 10;
+ for (int i = FIRST_KEY; i < 100; i += 10) {
+ String iStr = Integer.toString(i);
+ Text t = new Text("00".substring(iStr.length()) + iStr);
+ writer.append(t, t);
+ }
+ writer.close();
+ // Now do getClosest on created mapfile.
+ MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
+ conf);
+ Text key = new Text("55");
+ Text value = new Text();
+ Text closest = (Text)reader.getClosest(key, value);
+ // Assert that closest after 55 is 60
+ assertEquals(new Text("60"), closest);
+ // Get closest that falls before the passed key: 50
+ closest = (Text)reader.getClosest(key, value, true);
+ assertEquals(new Text("50"), closest);
+ // Test get closest when we pass explicit key
+ final Text TWENTY = new Text("20");
+ closest = (Text)reader.getClosest(TWENTY, value);
+ assertEquals(TWENTY, closest);
+ closest = (Text)reader.getClosest(TWENTY, value, true);
+ assertEquals(TWENTY, closest);
+ // Test what happens at boundaries. Assert if searching a key that is
+ // less than first key in the mapfile, that the first key is returned.
+ key = new Text("00");
+ closest = (Text)reader.getClosest(key, value);
+ assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
+
+ // If we're looking for the first key before, and we pass in a key before
+ // the first key in the file, we should get null
+ closest = (Text)reader.getClosest(key, value, true);
+ assertNull(closest);
+
+ // Assert that null is returned if key is > last entry in mapfile.
+ key = new Text("99");
+ closest = (Text)reader.getClosest(key, value);
+ assertNull(closest);
+
+ // If we were looking for the key before, we should get the last key
+ closest = (Text)reader.getClosest(key, value, true);
+ assertEquals(new Text("90"), closest);
+ }
+
+ public void testMidKey() throws Exception {
+ // Write a mapfile of simple data: keys are
+ Path dirName = new Path(System.getProperty("test.build.data",".") +
+ getName() + ".mapfile");
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path qualifiedDirName = fs.makeQualified(dirName);
+
+ MapFile.Writer writer = new MapFile.Writer(conf, fs,
+ qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
+ writer.append(new IntWritable(1), new IntWritable(1));
+ writer.close();
+ // Now do getClosest on created mapfile.
+ MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
+ conf);
+ assertEquals(new IntWritable(1), reader.midKey());
+ }
+
+
+ public void testMidKeyEmpty() throws Exception {
+ // Write a mapfile of simple data: keys are
+ Path dirName = new Path(System.getProperty("test.build.data",".") +
+ getName() + ".mapfile");
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path qualifiedDirName = fs.makeQualified(dirName);
+
+ MapFile.Writer writer = new MapFile.Writer(conf, fs,
+ qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
+ writer.close();
+ // Now do getClosest on created mapfile.
+ MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
+ conf);
+ assertEquals(null, reader.midKey());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestMapWritable.java b/src/test/core/org/apache/hadoop/io/TestMapWritable.java
new file mode 100644
index 0000000000..3d8c4ab3c2
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestMapWritable.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.util.Map;
+
+import junit.framework.TestCase;
+
+/**
+ * Tests MapWritable
+ */
+public class TestMapWritable extends TestCase {
+ /** the test */
+ @SuppressWarnings("unchecked")
+ public void testMapWritable() {
+ Text[] keys = {
+ new Text("key1"),
+ new Text("key2"),
+ new Text("Key3"),
+ };
+
+ BytesWritable[] values = {
+ new BytesWritable("value1".getBytes()),
+ new BytesWritable("value2".getBytes()),
+ new BytesWritable("value3".getBytes())
+ };
+
+ MapWritable inMap = new MapWritable();
+ for (int i = 0; i < keys.length; i++) {
+ inMap.put(keys[i], values[i]);
+ }
+
+ MapWritable outMap = new MapWritable(inMap);
+ assertEquals(inMap.size(), outMap.size());
+
+ for (Map.Entry e: inMap.entrySet()) {
+ assertTrue(outMap.containsKey(e.getKey()));
+ assertEquals(0, ((WritableComparable) outMap.get(e.getKey())).compareTo(
+ e.getValue()));
+ }
+
+ // Now for something a little harder...
+
+ Text[] maps = {
+ new Text("map1"),
+ new Text("map2")
+ };
+
+ MapWritable mapOfMaps = new MapWritable();
+ mapOfMaps.put(maps[0], inMap);
+ mapOfMaps.put(maps[1], outMap);
+
+ MapWritable copyOfMapOfMaps = new MapWritable(mapOfMaps);
+ for (int i = 0; i < maps.length; i++) {
+ assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
+ MapWritable a = (MapWritable) mapOfMaps.get(maps[i]);
+ MapWritable b = (MapWritable) copyOfMapOfMaps.get(maps[i]);
+ assertEquals(a.size(), b.size());
+ for (Writable key: a.keySet()) {
+ assertTrue(b.containsKey(key));
+
+ // This will work because we know what we put into each set
+
+ WritableComparable aValue = (WritableComparable) a.get(key);
+ WritableComparable bValue = (WritableComparable) b.get(key);
+ assertEquals(0, aValue.compareTo(bValue));
+ }
+ }
+ }
+
+ /**
+ * Test that number of "unknown" classes is propagated across multiple copies.
+ */
+ @SuppressWarnings("deprecation")
+ public void testForeignClass() {
+ MapWritable inMap = new MapWritable();
+ inMap.put(new Text("key"), new UTF8("value"));
+ inMap.put(new Text("key2"), new UTF8("value2"));
+ MapWritable outMap = new MapWritable(inMap);
+ MapWritable copyOfCopy = new MapWritable(outMap);
+ assertEquals(1, copyOfCopy.getNewClasses());
+ }
+
+ /**
+ * Assert MapWritable does not grow across calls to readFields.
+ * @throws Exception
+ * @see HADOOP-2244
+ */
+ public void testMultipleCallsToReadFieldsAreSafe() throws Exception {
+ // Create an instance and add a key/value.
+ MapWritable m = new MapWritable();
+ final Text t = new Text(getName());
+ m.put(t, t);
+ // Get current size of map. Key values are 't'.
+ int count = m.size();
+ // Now serialize... save off the bytes.
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ DataOutputStream dos = new DataOutputStream(baos);
+ m.write(dos);
+ dos.close();
+ // Now add new values to the MapWritable.
+ m.put(new Text("key1"), new Text("value1"));
+ m.put(new Text("key2"), new Text("value2"));
+ // Now deserialize the original MapWritable. Ensure count and key values
+ // match original state.
+ ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+ DataInputStream dis = new DataInputStream(bais);
+ m.readFields(dis);
+ assertEquals(count, m.size());
+ assertTrue(m.get(t).equals(t));
+ dis.close();
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestSequenceFileSerialization.java b/src/test/core/org/apache/hadoop/io/TestSequenceFileSerialization.java
new file mode 100644
index 0000000000..c9fc1eae4f
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestSequenceFileSerialization.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.SequenceFile.Reader;
+import org.apache.hadoop.io.SequenceFile.Writer;
+
+public class TestSequenceFileSerialization extends TestCase {
+
+ private Configuration conf;
+ private FileSystem fs;
+
+ @Override
+ protected void setUp() throws Exception {
+ conf = new Configuration();
+ conf.set("io.serializations",
+ "org.apache.hadoop.io.serializer.JavaSerialization");
+ fs = FileSystem.getLocal(conf);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ fs.close();
+ }
+
+ public void testJavaSerialization() throws Exception {
+ Path file = new Path(System.getProperty("test.build.data",".") +
+ "/test.seq");
+
+ fs.delete(file, true);
+ Writer writer = SequenceFile.createWriter(fs, conf, file, Long.class,
+ String.class);
+
+ writer.append(1L, "one");
+ writer.append(2L, "two");
+
+ writer.close();
+
+ Reader reader = new Reader(fs, file, conf);
+ assertEquals(1L, reader.next((Object) null));
+ assertEquals("one", reader.getCurrentValue((Object) null));
+ assertEquals(2L, reader.next((Object) null));
+ assertEquals("two", reader.getCurrentValue((Object) null));
+ assertNull(reader.next((Object) null));
+ reader.close();
+
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestSetFile.java b/src/test/core/org/apache/hadoop/io/TestSetFile.java
new file mode 100644
index 0000000000..70d02e013f
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestSetFile.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.io.*;
+import java.util.*;
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.*;
+
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.conf.*;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+
+/** Support for flat files of binary key/value pairs. */
+public class TestSetFile extends TestCase {
+ private static final Log LOG = LogFactory.getLog(TestSetFile.class);
+ private static String FILE =
+ System.getProperty("test.build.data",".") + "/test.set";
+
+ private static Configuration conf = new Configuration();
+
+ public TestSetFile(String name) { super(name); }
+
+ public void testSetFile() throws Exception {
+ FileSystem fs = FileSystem.getLocal(conf);
+ try {
+ RandomDatum[] data = generate(10000);
+ writeTest(fs, data, FILE, CompressionType.NONE);
+ readTest(fs, data, FILE);
+
+ writeTest(fs, data, FILE, CompressionType.BLOCK);
+ readTest(fs, data, FILE);
+ } finally {
+ fs.close();
+ }
+ }
+
+ private static RandomDatum[] generate(int count) {
+ LOG.info("generating " + count + " records in memory");
+ RandomDatum[] data = new RandomDatum[count];
+ RandomDatum.Generator generator = new RandomDatum.Generator();
+ for (int i = 0; i < count; i++) {
+ generator.next();
+ data[i] = generator.getValue();
+ }
+ LOG.info("sorting " + count + " records");
+ Arrays.sort(data);
+ return data;
+ }
+
+ private static void writeTest(FileSystem fs, RandomDatum[] data,
+ String file, CompressionType compress)
+ throws IOException {
+ MapFile.delete(fs, file);
+ LOG.info("creating with " + data.length + " records");
+ SetFile.Writer writer =
+ new SetFile.Writer(conf, fs, file,
+ WritableComparator.get(RandomDatum.class),
+ compress);
+ for (int i = 0; i < data.length; i++)
+ writer.append(data[i]);
+ writer.close();
+ }
+
+ private static void readTest(FileSystem fs, RandomDatum[] data, String file)
+ throws IOException {
+ RandomDatum v = new RandomDatum();
+ int sample = (int)Math.sqrt(data.length);
+ Random random = new Random();
+ LOG.info("reading " + sample + " records");
+ SetFile.Reader reader = new SetFile.Reader(fs, file, conf);
+ for (int i = 0; i < sample; i++) {
+ if (!reader.seek(data[random.nextInt(data.length)]))
+ throw new RuntimeException("wrong value at " + i);
+ }
+ reader.close();
+ LOG.info("done reading " + data.length);
+ }
+
+
+ /** For debugging and testing. */
+ public static void main(String[] args) throws Exception {
+ int count = 1024 * 1024;
+ boolean create = true;
+ boolean check = true;
+ String file = FILE;
+ String compress = "NONE";
+
+ String usage = "Usage: TestSetFile [-count N] [-nocreate] [-nocheck] [-compress type] file";
+
+ if (args.length == 0) {
+ System.err.println(usage);
+ System.exit(-1);
+ }
+
+ int i = 0;
+ Path fpath=null;
+ FileSystem fs = null;
+ try {
+ for (; i < args.length; i++) { // parse command line
+ if (args[i] == null) {
+ continue;
+ } else if (args[i].equals("-count")) {
+ count = Integer.parseInt(args[++i]);
+ } else if (args[i].equals("-nocreate")) {
+ create = false;
+ } else if (args[i].equals("-nocheck")) {
+ check = false;
+ } else if (args[i].equals("-compress")) {
+ compress = args[++i];
+ } else {
+ // file is required parameter
+ file = args[i];
+ fpath=new Path(file);
+ }
+ }
+
+ fs = fpath.getFileSystem(conf);
+
+ LOG.info("count = " + count);
+ LOG.info("create = " + create);
+ LOG.info("check = " + check);
+ LOG.info("compress = " + compress);
+ LOG.info("file = " + file);
+
+ RandomDatum[] data = generate(count);
+
+ if (create) {
+ writeTest(fs, data, file, CompressionType.valueOf(compress));
+ }
+
+ if (check) {
+ readTest(fs, data, file);
+ }
+
+ } finally {
+ fs.close();
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestSortedMapWritable.java b/src/test/core/org/apache/hadoop/io/TestSortedMapWritable.java
new file mode 100644
index 0000000000..927bfc1f42
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestSortedMapWritable.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import java.util.Map;
+
+import junit.framework.TestCase;
+
+/**
+ * Tests SortedMapWritable
+ */
+public class TestSortedMapWritable extends TestCase {
+ /** the test */
+ @SuppressWarnings("unchecked")
+ public void testSortedMapWritable() {
+ Text[] keys = {
+ new Text("key1"),
+ new Text("key2"),
+ new Text("key3"),
+ };
+
+ BytesWritable[] values = {
+ new BytesWritable("value1".getBytes()),
+ new BytesWritable("value2".getBytes()),
+ new BytesWritable("value3".getBytes())
+ };
+
+ SortedMapWritable inMap = new SortedMapWritable();
+ for (int i = 0; i < keys.length; i++) {
+ inMap.put(keys[i], values[i]);
+ }
+
+ assertEquals(0, inMap.firstKey().compareTo(keys[0]));
+ assertEquals(0, inMap.lastKey().compareTo(keys[2]));
+
+ SortedMapWritable outMap = new SortedMapWritable(inMap);
+ assertEquals(inMap.size(), outMap.size());
+
+ for (Map.Entry e: inMap.entrySet()) {
+ assertTrue(outMap.containsKey(e.getKey()));
+ assertEquals(0, ((WritableComparable) outMap.get(e.getKey())).compareTo(
+ e.getValue()));
+ }
+
+ // Now for something a little harder...
+
+ Text[] maps = {
+ new Text("map1"),
+ new Text("map2")
+ };
+
+ SortedMapWritable mapOfMaps = new SortedMapWritable();
+ mapOfMaps.put(maps[0], inMap);
+ mapOfMaps.put(maps[1], outMap);
+
+ SortedMapWritable copyOfMapOfMaps = new SortedMapWritable(mapOfMaps);
+ for (int i = 0; i < maps.length; i++) {
+ assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
+
+ SortedMapWritable a = (SortedMapWritable) mapOfMaps.get(maps[i]);
+ SortedMapWritable b = (SortedMapWritable) copyOfMapOfMaps.get(maps[i]);
+ assertEquals(a.size(), b.size());
+ for (Writable key: a.keySet()) {
+ assertTrue(b.containsKey(key));
+
+ // This will work because we know what we put into each set
+
+ WritableComparable aValue = (WritableComparable) a.get(key);
+ WritableComparable bValue = (WritableComparable) b.get(key);
+ assertEquals(0, aValue.compareTo(bValue));
+ }
+ }
+ }
+
+ /**
+ * Test that number of "unknown" classes is propagated across multiple copies.
+ */
+ @SuppressWarnings("deprecation")
+ public void testForeignClass() {
+ SortedMapWritable inMap = new SortedMapWritable();
+ inMap.put(new Text("key"), new UTF8("value"));
+ inMap.put(new Text("key2"), new UTF8("value2"));
+ SortedMapWritable outMap = new SortedMapWritable(inMap);
+ SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
+ assertEquals(1, copyOfCopy.getNewClasses());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestText.java b/src/test/core/org/apache/hadoop/io/TestText.java
new file mode 100644
index 0000000000..6e00486099
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestText.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import junit.framework.TestCase;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.CharacterCodingException;
+import java.util.Random;
+
+/** Unit tests for LargeUTF8. */
+public class TestText extends TestCase {
+ private static final int NUM_ITERATIONS = 100;
+ public TestText(String name) { super(name); }
+
+ private static final Random RANDOM = new Random(1);
+
+ private static final int RAND_LEN = -1;
+
+ // generate a valid java String
+ private static String getTestString(int len) throws Exception {
+ StringBuffer buffer = new StringBuffer();
+ int length = (len==RAND_LEN) ? RANDOM.nextInt(1000) : len;
+ while (buffer.length() test = WritableName.getClass("long",conf);
+ assertTrue(test != null);
+ }
+
+ public void testSetName() throws Exception {
+ Configuration conf = new Configuration();
+ WritableName.setName(SimpleWritable.class, testName);
+
+ Class> test = WritableName.getClass(testName,conf);
+ assertTrue(test.equals(SimpleWritable.class));
+ }
+
+
+ public void testAddName() throws Exception {
+ Configuration conf = new Configuration();
+ String altName = testName + ".alt";
+
+ WritableName.addName(SimpleWritable.class, altName);
+
+ Class> test = WritableName.getClass(altName, conf);
+ assertTrue(test.equals(SimpleWritable.class));
+
+ // check original name still works
+ test = WritableName.getClass(testName, conf);
+ assertTrue(test.equals(SimpleWritable.class));
+
+ }
+
+ public void testBadName() throws Exception {
+ Configuration conf = new Configuration();
+ try {
+ Class> test = WritableName.getClass("unknown_junk",conf);
+ assertTrue(false);
+ } catch(IOException e) {
+ assertTrue(e.getMessage().matches(".*unknown_junk.*"));
+ }
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/TestWritableUtils.java b/src/test/core/org/apache/hadoop/io/TestWritableUtils.java
new file mode 100644
index 0000000000..2487fc0612
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/TestWritableUtils.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import junit.framework.TestCase;
+
+public class TestWritableUtils extends TestCase {
+ private static final Log LOG = LogFactory.getLog(TestWritableUtils.class);
+
+ public static void testValue(int val, int vintlen) throws IOException {
+ DataOutputBuffer buf = new DataOutputBuffer();
+ DataInputBuffer inbuf = new DataInputBuffer();
+ WritableUtils.writeVInt(buf, val);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Value = " + val);
+ BytesWritable printer = new BytesWritable();
+ printer.set(buf.getData(), 0, buf.getLength());
+ LOG.debug("Buffer = " + printer);
+ }
+ inbuf.reset(buf.getData(), 0, buf.getLength());
+ assertEquals(val, WritableUtils.readVInt(inbuf));
+ assertEquals(vintlen, buf.getLength());
+ assertEquals(vintlen, WritableUtils.getVIntSize(val));
+ assertEquals(vintlen, WritableUtils.decodeVIntSize(buf.getData()[0]));
+ }
+
+ public static void testVInt() throws Exception {
+ testValue(12, 1);
+ testValue(127, 1);
+ testValue(-112, 1);
+ testValue(-113, 2);
+ testValue(-128, 2);
+ testValue(128, 2);
+ testValue(-129, 2);
+ testValue(255, 2);
+ testValue(-256, 2);
+ testValue(256, 3);
+ testValue(-257, 3);
+ testValue(65535, 3);
+ testValue(-65536, 3);
+ testValue(65536, 4);
+ testValue(-65537, 4);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/compress/TestCodec.java b/src/test/core/org/apache/hadoop/io/compress/TestCodec.java
new file mode 100644
index 0000000000..38e4a35837
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/compress/TestCodec.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+
+public class TestCodec extends TestCase {
+
+ private static final Log LOG=
+ LogFactory.getLog(TestCodec.class);
+
+ private Configuration conf = new Configuration();
+ private int count = 10000;
+ private int seed = new Random().nextInt();
+
+ public void testDefaultCodec() throws IOException {
+ codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.DefaultCodec");
+ codecTest(conf, seed, count, "org.apache.hadoop.io.compress.DefaultCodec");
+ }
+
+ public void testGzipCodec() throws IOException {
+ codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
+ codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
+ }
+
+ public void testBZip2Codec() throws IOException {
+ codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
+ codecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec");
+ }
+
+ private static void codecTest(Configuration conf, int seed, int count,
+ String codecClass)
+ throws IOException {
+
+ // Create the codec
+ CompressionCodec codec = null;
+ try {
+ codec = (CompressionCodec)
+ ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
+ } catch (ClassNotFoundException cnfe) {
+ throw new IOException("Illegal codec!");
+ }
+ LOG.info("Created a Codec object of type: " + codecClass);
+
+ // Generate data
+ DataOutputBuffer data = new DataOutputBuffer();
+ RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+ for(int i=0; i < count; ++i) {
+ generator.next();
+ RandomDatum key = generator.getKey();
+ RandomDatum value = generator.getValue();
+
+ key.write(data);
+ value.write(data);
+ }
+ DataInputBuffer originalData = new DataInputBuffer();
+ DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
+ originalData.reset(data.getData(), 0, data.getLength());
+
+ LOG.info("Generated " + count + " records");
+
+ // Compress data
+ DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
+ CompressionOutputStream deflateFilter =
+ codec.createOutputStream(compressedDataBuffer);
+ DataOutputStream deflateOut =
+ new DataOutputStream(new BufferedOutputStream(deflateFilter));
+ deflateOut.write(data.getData(), 0, data.getLength());
+ deflateOut.flush();
+ deflateFilter.finish();
+ LOG.info("Finished compressing data");
+
+ // De-compress data
+ DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
+ deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
+ compressedDataBuffer.getLength());
+ CompressionInputStream inflateFilter =
+ codec.createInputStream(deCompressedDataBuffer);
+ DataInputStream inflateIn =
+ new DataInputStream(new BufferedInputStream(inflateFilter));
+
+ // Check
+ for(int i=0; i < count; ++i) {
+ RandomDatum k1 = new RandomDatum();
+ RandomDatum v1 = new RandomDatum();
+ k1.readFields(originalIn);
+ v1.readFields(originalIn);
+
+ RandomDatum k2 = new RandomDatum();
+ RandomDatum v2 = new RandomDatum();
+ k2.readFields(inflateIn);
+ v2.readFields(inflateIn);
+ }
+ LOG.info("SUCCESS! Completed checking " + count + " records");
+ }
+
+ public void testCodecPoolGzipReuse() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean("hadoop.native.lib", true);
+ if (!ZlibFactory.isNativeZlibLoaded(conf)) {
+ LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
+ return;
+ }
+ GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
+ DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
+ Compressor c1 = CodecPool.getCompressor(gzc);
+ Compressor c2 = CodecPool.getCompressor(dfc);
+ CodecPool.returnCompressor(c1);
+ CodecPool.returnCompressor(c2);
+ assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
+ }
+
+ public void testSequenceFileDefaultCodec() throws IOException, ClassNotFoundException,
+ InstantiationException, IllegalAccessException {
+ sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.DefaultCodec", 100);
+ sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.DefaultCodec", 1000000);
+ }
+
+ public void testSequenceFileBZip2Codec() throws IOException, ClassNotFoundException,
+ InstantiationException, IllegalAccessException {
+ sequenceFileCodecTest(conf, 0, "org.apache.hadoop.io.compress.BZip2Codec", 100);
+ sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.BZip2Codec", 100);
+ sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.BZip2Codec", 1000000);
+ }
+
+ private static void sequenceFileCodecTest(Configuration conf, int lines,
+ String codecClass, int blockSize)
+ throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException {
+
+ Path filePath = new Path("SequenceFileCodecTest." + codecClass);
+ // Configuration
+ conf.setInt("io.seqfile.compress.blocksize", blockSize);
+
+ // Create the SequenceFile
+ FileSystem fs = FileSystem.get(conf);
+ LOG.info("Creating SequenceFile with codec \"" + codecClass + "\"");
+ SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, filePath,
+ Text.class, Text.class, CompressionType.BLOCK,
+ (CompressionCodec)Class.forName(codecClass).newInstance());
+
+ // Write some data
+ LOG.info("Writing to SequenceFile...");
+ for (int i=0; i getCompressorType() {
+ return null;
+ }
+
+ public Compressor createCompressor() {
+ return null;
+ }
+
+ public CompressionInputStream createInputStream(InputStream in,
+ Decompressor decompressor)
+ throws IOException {
+ return null;
+ }
+
+ public CompressionInputStream createInputStream(InputStream in)
+ throws IOException {
+ return null;
+ }
+
+ public CompressionOutputStream createOutputStream(OutputStream out,
+ Compressor compressor)
+ throws IOException {
+ return null;
+ }
+
+ public Class extends Decompressor> getDecompressorType() {
+ return null;
+ }
+
+ public Decompressor createDecompressor() {
+ return null;
+ }
+
+ public String getDefaultExtension() {
+ return ".base";
+ }
+ }
+
+ private static class BarCodec extends BaseCodec {
+ public String getDefaultExtension() {
+ return "bar";
+ }
+ }
+
+ private static class FooBarCodec extends BaseCodec {
+ public String getDefaultExtension() {
+ return ".foo.bar";
+ }
+ }
+
+ private static class FooCodec extends BaseCodec {
+ public String getDefaultExtension() {
+ return ".foo";
+ }
+ }
+
+ /**
+ * Returns a factory for a given set of codecs
+ * @param classes the codec classes to include
+ * @return a new factory
+ */
+ private static CompressionCodecFactory setClasses(Class[] classes) {
+ Configuration conf = new Configuration();
+ CompressionCodecFactory.setCodecClasses(conf, Arrays.asList(classes));
+ return new CompressionCodecFactory(conf);
+ }
+
+ private static void checkCodec(String msg,
+ Class expected, CompressionCodec actual) {
+ assertEquals(msg + " unexpected codec found",
+ expected.getName(),
+ actual.getClass().getName());
+ }
+
+ public static void testFinding() {
+ CompressionCodecFactory factory =
+ new CompressionCodecFactory(new Configuration());
+ CompressionCodec codec = factory.getCodec(new Path("/tmp/foo.bar"));
+ assertEquals("default factory foo codec", null, codec);
+ codec = factory.getCodec(new Path("/tmp/foo.gz"));
+ checkCodec("default factory for .gz", GzipCodec.class, codec);
+ codec = factory.getCodec(new Path("/tmp/foo.bz2"));
+ checkCodec("default factory for .bz2", BZip2Codec.class, codec);
+ factory = setClasses(new Class[0]);
+ codec = factory.getCodec(new Path("/tmp/foo.bar"));
+ assertEquals("empty codec bar codec", null, codec);
+ codec = factory.getCodec(new Path("/tmp/foo.gz"));
+ assertEquals("empty codec gz codec", null, codec);
+ codec = factory.getCodec(new Path("/tmp/foo.bz2"));
+ assertEquals("default factory for .bz2", null, codec);
+ factory = setClasses(new Class[]{BarCodec.class, FooCodec.class,
+ FooBarCodec.class});
+ codec = factory.getCodec(new Path("/tmp/.foo.bar.gz"));
+ assertEquals("full factory gz codec", null, codec);
+ codec = factory.getCodec(new Path("/tmp/foo.bz2"));
+ assertEquals("default factory for .bz2", null, codec);
+ codec = factory.getCodec(new Path("/tmp/foo.bar"));
+ checkCodec("full factory bar codec", BarCodec.class, codec);
+ codec = factory.getCodec(new Path("/tmp/foo/baz.foo.bar"));
+ checkCodec("full factory foo bar codec", FooBarCodec.class, codec);
+ codec = factory.getCodec(new Path("/tmp/foo.foo"));
+ checkCodec("full factory foo codec", FooCodec.class, codec);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/io/retry/TestRetryProxy.java b/src/test/core/org/apache/hadoop/io/retry/TestRetryProxy.java
new file mode 100644
index 0000000000..c48e87b7dd
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.retry;
+
+import static org.apache.hadoop.io.retry.RetryPolicies.RETRY_FOREVER;
+import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_DONT_FAIL;
+import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryByException;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryByRemoteException;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
+import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.io.retry.UnreliableInterface.FatalException;
+import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
+import org.apache.hadoop.ipc.RemoteException;
+
+public class TestRetryProxy extends TestCase {
+
+ private UnreliableImplementation unreliableImpl;
+
+ @Override
+ protected void setUp() throws Exception {
+ unreliableImpl = new UnreliableImplementation();
+ }
+
+ public void testTryOnceThenFail() throws UnreliableException {
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl, TRY_ONCE_THEN_FAIL);
+ unreliable.alwaysSucceeds();
+ try {
+ unreliable.failsOnceThenSucceeds();
+ fail("Should fail");
+ } catch (UnreliableException e) {
+ // expected
+ }
+ }
+
+ public void testTryOnceDontFail() throws UnreliableException {
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl, TRY_ONCE_DONT_FAIL);
+ unreliable.alwaysSucceeds();
+ unreliable.failsOnceThenSucceeds();
+ try {
+ unreliable.failsOnceThenSucceedsWithReturnValue();
+ fail("Should fail");
+ } catch (UnreliableException e) {
+ // expected
+ }
+ }
+
+ public void testRetryForever() throws UnreliableException {
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
+ unreliable.alwaysSucceeds();
+ unreliable.failsOnceThenSucceeds();
+ unreliable.failsTenTimesThenSucceeds();
+ }
+
+ public void testRetryUpToMaximumCountWithFixedSleep() throws UnreliableException {
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ retryUpToMaximumCountWithFixedSleep(8, 1, TimeUnit.NANOSECONDS));
+ unreliable.alwaysSucceeds();
+ unreliable.failsOnceThenSucceeds();
+ try {
+ unreliable.failsTenTimesThenSucceeds();
+ fail("Should fail");
+ } catch (UnreliableException e) {
+ // expected
+ }
+ }
+
+ public void testRetryUpToMaximumTimeWithFixedSleep() throws UnreliableException {
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ retryUpToMaximumTimeWithFixedSleep(80, 10, TimeUnit.NANOSECONDS));
+ unreliable.alwaysSucceeds();
+ unreliable.failsOnceThenSucceeds();
+ try {
+ unreliable.failsTenTimesThenSucceeds();
+ fail("Should fail");
+ } catch (UnreliableException e) {
+ // expected
+ }
+ }
+
+ public void testRetryUpToMaximumCountWithProportionalSleep() throws UnreliableException {
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS));
+ unreliable.alwaysSucceeds();
+ unreliable.failsOnceThenSucceeds();
+ try {
+ unreliable.failsTenTimesThenSucceeds();
+ fail("Should fail");
+ } catch (UnreliableException e) {
+ // expected
+ }
+ }
+
+ public void testExponentialRetry() throws UnreliableException {
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ exponentialBackoffRetry(5, 1L, TimeUnit.NANOSECONDS));
+ unreliable.alwaysSucceeds();
+ unreliable.failsOnceThenSucceeds();
+ try {
+ unreliable.failsTenTimesThenSucceeds();
+ fail("Should fail");
+ } catch (UnreliableException e) {
+ // expected
+ }
+ }
+
+ public void testRetryByException() throws UnreliableException {
+ Map, RetryPolicy> exceptionToPolicyMap =
+ Collections., RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
+
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ retryByException(RETRY_FOREVER, exceptionToPolicyMap));
+ unreliable.failsOnceThenSucceeds();
+ try {
+ unreliable.alwaysFailsWithFatalException();
+ fail("Should fail");
+ } catch (FatalException e) {
+ // expected
+ }
+ }
+
+ public void testRetryByRemoteException() throws UnreliableException {
+ Map, RetryPolicy> exceptionToPolicyMap =
+ Collections., RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
+
+ UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ retryByRemoteException(RETRY_FOREVER, exceptionToPolicyMap));
+ try {
+ unreliable.alwaysFailsWithRemoteFatalException();
+ fail("Should fail");
+ } catch (RemoteException e) {
+ // expected
+ }
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/retry/UnreliableImplementation.java b/src/test/core/org/apache/hadoop/io/retry/UnreliableImplementation.java
new file mode 100644
index 0000000000..5971ee7216
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.retry;
+
+import org.apache.hadoop.ipc.RemoteException;
+
+public class UnreliableImplementation implements UnreliableInterface {
+
+ private int failsOnceInvocationCount,
+ failsOnceWithValueInvocationCount,
+ failsTenTimesInvocationCount;
+
+ public void alwaysSucceeds() {
+ // do nothing
+ }
+
+ public void alwaysFailsWithFatalException() throws FatalException {
+ throw new FatalException();
+ }
+
+ public void alwaysFailsWithRemoteFatalException() throws RemoteException {
+ throw new RemoteException(FatalException.class.getName(), "Oops");
+ }
+
+ public void failsOnceThenSucceeds() throws UnreliableException {
+ if (failsOnceInvocationCount++ == 0) {
+ throw new UnreliableException();
+ }
+ }
+
+ public boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException {
+ if (failsOnceWithValueInvocationCount++ == 0) {
+ throw new UnreliableException();
+ }
+ return true;
+ }
+
+ public void failsTenTimesThenSucceeds() throws UnreliableException {
+ if (failsTenTimesInvocationCount++ < 10) {
+ throw new UnreliableException();
+ }
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/io/retry/UnreliableInterface.java b/src/test/core/org/apache/hadoop/io/retry/UnreliableInterface.java
new file mode 100644
index 0000000000..af4959151e
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/retry/UnreliableInterface.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.retry;
+
+import org.apache.hadoop.ipc.RemoteException;
+
+public interface UnreliableInterface {
+
+ public static class UnreliableException extends Exception {
+ // no body
+ }
+
+ public static class FatalException extends UnreliableException {
+ // no body
+ }
+
+ void alwaysSucceeds() throws UnreliableException;
+
+ void alwaysFailsWithFatalException() throws FatalException;
+ void alwaysFailsWithRemoteFatalException() throws RemoteException;
+
+ void failsOnceThenSucceeds() throws UnreliableException;
+ boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException;
+
+ void failsTenTimesThenSucceeds() throws UnreliableException;
+}
diff --git a/src/test/core/org/apache/hadoop/io/serializer/TestWritableSerialization.java b/src/test/core/org/apache/hadoop/io/serializer/TestWritableSerialization.java
new file mode 100644
index 0000000000..6a55175324
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/io/serializer/TestWritableSerialization.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.serializer;
+
+import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_KEY;
+import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_VALUE;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.TestGenericWritable.Baz;
+import org.apache.hadoop.io.TestGenericWritable.FooGenericWritable;
+import org.apache.hadoop.util.GenericsUtil;
+
+public class TestWritableSerialization extends TestCase {
+
+ private static final Configuration conf = new Configuration();
+
+ static {
+ conf.set("io.serializations"
+ , "org.apache.hadoop.io.serializer.WritableSerialization");
+ }
+
+ public void testWritableSerialization() throws Exception {
+ Text before = new Text("test writable");
+ testSerialization(conf, before);
+ }
+
+
+ public void testWritableConfigurable() throws Exception {
+
+ //set the configuration parameter
+ conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
+
+ //reuse TestGenericWritable inner classes to test
+ //writables that also implement Configurable.
+ FooGenericWritable generic = new FooGenericWritable();
+ generic.setConf(conf);
+ Baz baz = new Baz();
+ generic.set(baz);
+ Baz result = testSerialization(conf, baz);
+ assertNotNull(result.getConf());
+ }
+
+ /**
+ * A utility that tests serialization/deserialization.
+ * @param the class of the item
+ * @param conf configuration to use, "io.serializations" is read to
+ * determine the serialization
+ * @param before item to (de)serialize
+ * @return deserialized item
+ */
+ public static K testSerialization(Configuration conf, K before)
+ throws Exception {
+
+ SerializationFactory factory = new SerializationFactory(conf);
+ Serializer serializer
+ = factory.getSerializer(GenericsUtil.getClass(before));
+ Deserializer deserializer
+ = factory.getDeserializer(GenericsUtil.getClass(before));
+
+ DataOutputBuffer out = new DataOutputBuffer();
+ serializer.open(out);
+ serializer.serialize(before);
+ serializer.close();
+
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), out.getLength());
+ deserializer.open(in);
+ K after = deserializer.deserialize(null);
+ deserializer.close();
+
+ assertEquals(before, after);
+ return after;
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/ipc/TestIPC.java b/src/test/core/org/apache/hadoop/ipc/TestIPC.java
new file mode 100644
index 0000000000..df5a155815
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/ipc/TestIPC.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import org.apache.commons.logging.*;
+
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.net.NetUtils;
+
+import java.util.Random;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+
+/** Unit tests for IPC. */
+public class TestIPC extends TestCase {
+ public static final Log LOG =
+ LogFactory.getLog(TestIPC.class);
+
+ final private static Configuration conf = new Configuration();
+ final static private int PING_INTERVAL = 1000;
+
+ static {
+ Client.setPingInterval(conf, PING_INTERVAL);
+ }
+ public TestIPC(String name) { super(name); }
+
+ private static final Random RANDOM = new Random();
+
+ private static final String ADDRESS = "0.0.0.0";
+
+ private static class TestServer extends Server {
+ private boolean sleep;
+
+ public TestServer(int handlerCount, boolean sleep)
+ throws IOException {
+ super(ADDRESS, 0, LongWritable.class, handlerCount, conf);
+ this.sleep = sleep;
+ }
+
+ @Override
+ public Writable call(Class> protocol, Writable param, long receiveTime)
+ throws IOException {
+ if (sleep) {
+ try {
+ Thread.sleep(RANDOM.nextInt(2*PING_INTERVAL)); // sleep a bit
+ } catch (InterruptedException e) {}
+ }
+ return param; // echo param as result
+ }
+ }
+
+ private static class SerialCaller extends Thread {
+ private Client client;
+ private InetSocketAddress server;
+ private int count;
+ private boolean failed;
+
+ public SerialCaller(Client client, InetSocketAddress server, int count) {
+ this.client = client;
+ this.server = server;
+ this.count = count;
+ }
+
+ public void run() {
+ for (int i = 0; i < count; i++) {
+ try {
+ LongWritable param = new LongWritable(RANDOM.nextLong());
+ LongWritable value =
+ (LongWritable)client.call(param, server);
+ if (!param.equals(value)) {
+ LOG.fatal("Call failed!");
+ failed = true;
+ break;
+ }
+ } catch (Exception e) {
+ LOG.fatal("Caught: " + StringUtils.stringifyException(e));
+ failed = true;
+ }
+ }
+ }
+ }
+
+ private static class ParallelCaller extends Thread {
+ private Client client;
+ private int count;
+ private InetSocketAddress[] addresses;
+ private boolean failed;
+
+ public ParallelCaller(Client client, InetSocketAddress[] addresses,
+ int count) {
+ this.client = client;
+ this.addresses = addresses;
+ this.count = count;
+ }
+
+ public void run() {
+ for (int i = 0; i < count; i++) {
+ try {
+ Writable[] params = new Writable[addresses.length];
+ for (int j = 0; j < addresses.length; j++)
+ params[j] = new LongWritable(RANDOM.nextLong());
+ Writable[] values = client.call(params, addresses);
+ for (int j = 0; j < addresses.length; j++) {
+ if (!params[j].equals(values[j])) {
+ LOG.fatal("Call failed!");
+ failed = true;
+ break;
+ }
+ }
+ } catch (Exception e) {
+ LOG.fatal("Caught: " + StringUtils.stringifyException(e));
+ failed = true;
+ }
+ }
+ }
+ }
+
+ public void testSerial() throws Exception {
+ testSerial(3, false, 2, 5, 100);
+ }
+
+ public void testSerial(int handlerCount, boolean handlerSleep,
+ int clientCount, int callerCount, int callCount)
+ throws Exception {
+ Server server = new TestServer(handlerCount, handlerSleep);
+ InetSocketAddress addr = NetUtils.getConnectAddress(server);
+ server.start();
+
+ Client[] clients = new Client[clientCount];
+ for (int i = 0; i < clientCount; i++) {
+ clients[i] = new Client(LongWritable.class, conf);
+ }
+
+ SerialCaller[] callers = new SerialCaller[callerCount];
+ for (int i = 0; i < callerCount; i++) {
+ callers[i] = new SerialCaller(clients[i%clientCount], addr, callCount);
+ callers[i].start();
+ }
+ for (int i = 0; i < callerCount; i++) {
+ callers[i].join();
+ assertFalse(callers[i].failed);
+ }
+ for (int i = 0; i < clientCount; i++) {
+ clients[i].stop();
+ }
+ server.stop();
+ }
+
+ public void testParallel() throws Exception {
+ testParallel(10, false, 2, 4, 2, 4, 100);
+ }
+
+ public void testParallel(int handlerCount, boolean handlerSleep,
+ int serverCount, int addressCount,
+ int clientCount, int callerCount, int callCount)
+ throws Exception {
+ Server[] servers = new Server[serverCount];
+ for (int i = 0; i < serverCount; i++) {
+ servers[i] = new TestServer(handlerCount, handlerSleep);
+ servers[i].start();
+ }
+
+ InetSocketAddress[] addresses = new InetSocketAddress[addressCount];
+ for (int i = 0; i < addressCount; i++) {
+ addresses[i] = NetUtils.getConnectAddress(servers[i%serverCount]);
+ }
+
+ Client[] clients = new Client[clientCount];
+ for (int i = 0; i < clientCount; i++) {
+ clients[i] = new Client(LongWritable.class, conf);
+ }
+
+ ParallelCaller[] callers = new ParallelCaller[callerCount];
+ for (int i = 0; i < callerCount; i++) {
+ callers[i] =
+ new ParallelCaller(clients[i%clientCount], addresses, callCount);
+ callers[i].start();
+ }
+ for (int i = 0; i < callerCount; i++) {
+ callers[i].join();
+ assertFalse(callers[i].failed);
+ }
+ for (int i = 0; i < clientCount; i++) {
+ clients[i].stop();
+ }
+ for (int i = 0; i < serverCount; i++) {
+ servers[i].stop();
+ }
+ }
+
+ public void testStandAloneClient() throws Exception {
+ testParallel(10, false, 2, 4, 2, 4, 100);
+ Client client = new Client(LongWritable.class, conf);
+ InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
+ try {
+ client.call(new LongWritable(RANDOM.nextLong()),
+ address);
+ fail("Expected an exception to have been thrown");
+ } catch (IOException e) {
+ String message = e.getMessage();
+ String addressText = address.toString();
+ assertTrue("Did not find "+addressText+" in "+message,
+ message.contains(addressText));
+ Throwable cause=e.getCause();
+ assertNotNull("No nested exception in "+e,cause);
+ String causeText=cause.getMessage();
+ assertTrue("Did not find " + causeText + " in " + message,
+ message.contains(causeText));
+ }
+ }
+
+
+ public static void main(String[] args) throws Exception {
+
+ //new TestIPC("test").testSerial(5, false, 2, 10, 1000);
+
+ new TestIPC("test").testParallel(10, false, 2, 4, 2, 4, 1000);
+
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/ipc/TestIPCServerResponder.java b/src/test/core/org/apache/hadoop/ipc/TestIPCServerResponder.java
new file mode 100644
index 0000000000..2591da0143
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/ipc/TestIPCServerResponder.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.net.NetUtils;
+
+/**
+ * This test provokes partial writes in the server, which is
+ * serving multiple clients.
+ */
+public class TestIPCServerResponder extends TestCase {
+
+ public static final Log LOG =
+ LogFactory.getLog(TestIPCServerResponder.class);
+
+ private static Configuration conf = new Configuration();
+
+ public TestIPCServerResponder(final String name) {
+ super(name);
+ }
+
+ private static final Random RANDOM = new Random();
+
+ private static final String ADDRESS = "0.0.0.0";
+
+ private static final int BYTE_COUNT = 1024;
+ private static final byte[] BYTES = new byte[BYTE_COUNT];
+ static {
+ for (int i = 0; i < BYTE_COUNT; i++)
+ BYTES[i] = (byte) ('a' + (i % 26));
+ }
+
+ private static class TestServer extends Server {
+
+ private boolean sleep;
+
+ public TestServer(final int handlerCount, final boolean sleep)
+ throws IOException {
+ super(ADDRESS, 0, BytesWritable.class, handlerCount, conf);
+ // Set the buffer size to half of the maximum parameter/result size
+ // to force the socket to block
+ this.setSocketSendBufSize(BYTE_COUNT / 2);
+ this.sleep = sleep;
+ }
+
+ @Override
+ public Writable call(Class> protocol, Writable param, long receiveTime)
+ throws IOException {
+ if (sleep) {
+ try {
+ Thread.sleep(RANDOM.nextInt(20)); // sleep a bit
+ } catch (InterruptedException e) {}
+ }
+ return param;
+ }
+ }
+
+ private static class Caller extends Thread {
+
+ private Client client;
+ private int count;
+ private InetSocketAddress address;
+ private boolean failed;
+
+ public Caller(final Client client, final InetSocketAddress address,
+ final int count) {
+ this.client = client;
+ this.address = address;
+ this.count = count;
+ }
+
+ @Override
+ public void run() {
+ for (int i = 0; i < count; i++) {
+ try {
+ int byteSize = RANDOM.nextInt(BYTE_COUNT);
+ byte[] bytes = new byte[byteSize];
+ System.arraycopy(BYTES, 0, bytes, 0, byteSize);
+ Writable param = new BytesWritable(bytes);
+ Writable value = client.call(param, address);
+ Thread.sleep(RANDOM.nextInt(20));
+ } catch (Exception e) {
+ LOG.fatal("Caught: " + e);
+ failed = true;
+ }
+ }
+ }
+ }
+
+ public void testServerResponder() throws Exception {
+ testServerResponder(10, true, 1, 10, 200);
+ }
+
+ public void testServerResponder(final int handlerCount,
+ final boolean handlerSleep,
+ final int clientCount,
+ final int callerCount,
+ final int callCount) throws Exception {
+ Server server = new TestServer(handlerCount, handlerSleep);
+ server.start();
+
+ InetSocketAddress address = NetUtils.getConnectAddress(server);
+ Client[] clients = new Client[clientCount];
+ for (int i = 0; i < clientCount; i++) {
+ clients[i] = new Client(BytesWritable.class, conf);
+ }
+
+ Caller[] callers = new Caller[callerCount];
+ for (int i = 0; i < callerCount; i++) {
+ callers[i] = new Caller(clients[i % clientCount], address, callCount);
+ callers[i].start();
+ }
+ for (int i = 0; i < callerCount; i++) {
+ callers[i].join();
+ assertFalse(callers[i].failed);
+ }
+ for (int i = 0; i < clientCount; i++) {
+ clients[i].stop();
+ }
+ server.stop();
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/ipc/TestRPC.java b/src/test/core/org/apache/hadoop/ipc/TestRPC.java
new file mode 100644
index 0000000000..d0db263cc1
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/ipc/TestRPC.java
@@ -0,0 +1,391 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.InetSocketAddress;
+import java.lang.reflect.Method;
+
+import junit.framework.TestCase;
+
+import java.util.Arrays;
+
+import org.apache.commons.logging.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.UTF8;
+import org.apache.hadoop.io.Writable;
+
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ConfiguredPolicy;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+
+/** Unit tests for RPC. */
+public class TestRPC extends TestCase {
+ private static final String ADDRESS = "0.0.0.0";
+
+ public static final Log LOG =
+ LogFactory.getLog(TestRPC.class);
+
+ private static Configuration conf = new Configuration();
+
+ int datasize = 1024*100;
+ int numThreads = 50;
+
+ public TestRPC(String name) { super(name); }
+
+ public interface TestProtocol extends VersionedProtocol {
+ public static final long versionID = 1L;
+
+ void ping() throws IOException;
+ void slowPing(boolean shouldSlow) throws IOException;
+ String echo(String value) throws IOException;
+ String[] echo(String[] value) throws IOException;
+ Writable echo(Writable value) throws IOException;
+ int add(int v1, int v2) throws IOException;
+ int add(int[] values) throws IOException;
+ int error() throws IOException;
+ void testServerGet() throws IOException;
+ int[] exchange(int[] values) throws IOException;
+ }
+
+ public class TestImpl implements TestProtocol {
+ int fastPingCounter = 0;
+
+ public long getProtocolVersion(String protocol, long clientVersion) {
+ return TestProtocol.versionID;
+ }
+
+ public void ping() {}
+
+ public synchronized void slowPing(boolean shouldSlow) {
+ if (shouldSlow) {
+ while (fastPingCounter < 2) {
+ try {
+ wait(); // slow response until two fast pings happened
+ } catch (InterruptedException ignored) {}
+ }
+ fastPingCounter -= 2;
+ } else {
+ fastPingCounter++;
+ notify();
+ }
+ }
+
+ public String echo(String value) throws IOException { return value; }
+
+ public String[] echo(String[] values) throws IOException { return values; }
+
+ public Writable echo(Writable writable) {
+ return writable;
+ }
+ public int add(int v1, int v2) {
+ return v1 + v2;
+ }
+
+ public int add(int[] values) {
+ int sum = 0;
+ for (int i = 0; i < values.length; i++) {
+ sum += values[i];
+ }
+ return sum;
+ }
+
+ public int error() throws IOException {
+ throw new IOException("bobo");
+ }
+
+ public void testServerGet() throws IOException {
+ if (!(Server.get() instanceof RPC.Server)) {
+ throw new IOException("Server.get() failed");
+ }
+ }
+
+ public int[] exchange(int[] values) {
+ for (int i = 0; i < values.length; i++) {
+ values[i] = i;
+ }
+ return values;
+ }
+ }
+
+ //
+ // an object that does a bunch of transactions
+ //
+ static class Transactions implements Runnable {
+ int datasize;
+ TestProtocol proxy;
+
+ Transactions(TestProtocol proxy, int datasize) {
+ this.proxy = proxy;
+ this.datasize = datasize;
+ }
+
+ // do two RPC that transfers data.
+ public void run() {
+ int[] indata = new int[datasize];
+ int[] outdata = null;
+ int val = 0;
+ try {
+ outdata = proxy.exchange(indata);
+ val = proxy.add(1,2);
+ } catch (IOException e) {
+ assertTrue("Exception from RPC exchange() " + e, false);
+ }
+ assertEquals(indata.length, outdata.length);
+ assertEquals(val, 3);
+ for (int i = 0; i < outdata.length; i++) {
+ assertEquals(outdata[i], i);
+ }
+ }
+ }
+
+ //
+ // A class that does an RPC but does not read its response.
+ //
+ static class SlowRPC implements Runnable {
+ private TestProtocol proxy;
+ private volatile boolean done;
+
+ SlowRPC(TestProtocol proxy) {
+ this.proxy = proxy;
+ done = false;
+ }
+
+ boolean isDone() {
+ return done;
+ }
+
+ public void run() {
+ try {
+ proxy.slowPing(true); // this would hang until two fast pings happened
+ done = true;
+ } catch (IOException e) {
+ assertTrue("SlowRPC ping exception " + e, false);
+ }
+ }
+ }
+
+ public void testSlowRpc() throws Exception {
+ System.out.println("Testing Slow RPC");
+ // create a server with two handlers
+ Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 2, false, conf);
+ TestProtocol proxy = null;
+
+ try {
+ server.start();
+
+ InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
+ // create a client
+ proxy = (TestProtocol)RPC.getProxy(
+ TestProtocol.class, TestProtocol.versionID, addr, conf);
+
+ SlowRPC slowrpc = new SlowRPC(proxy);
+ Thread thread = new Thread(slowrpc, "SlowRPC");
+ thread.start(); // send a slow RPC, which won't return until two fast pings
+ assertTrue("Slow RPC should not have finished1.", !slowrpc.isDone());
+
+ proxy.slowPing(false); // first fast ping
+
+ // verify that the first RPC is still stuck
+ assertTrue("Slow RPC should not have finished2.", !slowrpc.isDone());
+
+ proxy.slowPing(false); // second fast ping
+
+ // Now the slow ping should be able to be executed
+ while (!slowrpc.isDone()) {
+ System.out.println("Waiting for slow RPC to get done.");
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {}
+ }
+ } finally {
+ server.stop();
+ if (proxy != null) {
+ RPC.stopProxy(proxy);
+ }
+ System.out.println("Down slow rpc testing");
+ }
+ }
+
+
+ public void testCalls() throws Exception {
+ Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, conf);
+ TestProtocol proxy = null;
+ try {
+ server.start();
+
+ InetSocketAddress addr = NetUtils.getConnectAddress(server);
+ proxy = (TestProtocol)RPC.getProxy(
+ TestProtocol.class, TestProtocol.versionID, addr, conf);
+
+ proxy.ping();
+
+ String stringResult = proxy.echo("foo");
+ assertEquals(stringResult, "foo");
+
+ stringResult = proxy.echo((String)null);
+ assertEquals(stringResult, null);
+
+ String[] stringResults = proxy.echo(new String[]{"foo","bar"});
+ assertTrue(Arrays.equals(stringResults, new String[]{"foo","bar"}));
+
+ stringResults = proxy.echo((String[])null);
+ assertTrue(Arrays.equals(stringResults, null));
+
+ UTF8 utf8Result = (UTF8)proxy.echo(new UTF8("hello world"));
+ assertEquals(utf8Result, new UTF8("hello world"));
+
+ utf8Result = (UTF8)proxy.echo((UTF8)null);
+ assertEquals(utf8Result, null);
+
+ int intResult = proxy.add(1, 2);
+ assertEquals(intResult, 3);
+
+ intResult = proxy.add(new int[] {1, 2});
+ assertEquals(intResult, 3);
+
+ boolean caught = false;
+ try {
+ proxy.error();
+ } catch (IOException e) {
+ LOG.debug("Caught " + e);
+ caught = true;
+ }
+ assertTrue(caught);
+
+ proxy.testServerGet();
+
+ // create multiple threads and make them do large data transfers
+ System.out.println("Starting multi-threaded RPC test...");
+ server.setSocketSendBufSize(1024);
+ Thread threadId[] = new Thread[numThreads];
+ for (int i = 0; i < numThreads; i++) {
+ Transactions trans = new Transactions(proxy, datasize);
+ threadId[i] = new Thread(trans, "TransactionThread-" + i);
+ threadId[i].start();
+ }
+
+ // wait for all transactions to get over
+ System.out.println("Waiting for all threads to finish RPCs...");
+ for (int i = 0; i < numThreads; i++) {
+ try {
+ threadId[i].join();
+ } catch (InterruptedException e) {
+ i--; // retry
+ }
+ }
+
+ // try some multi-calls
+ Method echo =
+ TestProtocol.class.getMethod("echo", new Class[] { String.class });
+ String[] strings = (String[])RPC.call(echo, new String[][]{{"a"},{"b"}},
+ new InetSocketAddress[] {addr, addr}, conf);
+ assertTrue(Arrays.equals(strings, new String[]{"a","b"}));
+
+ Method ping = TestProtocol.class.getMethod("ping", new Class[] {});
+ Object[] voids = (Object[])RPC.call(ping, new Object[][]{{},{}},
+ new InetSocketAddress[] {addr, addr}, conf);
+ assertEquals(voids, null);
+ } finally {
+ server.stop();
+ if(proxy!=null) RPC.stopProxy(proxy);
+ }
+ }
+
+ public void testStandaloneClient() throws IOException {
+ try {
+ RPC.waitForProxy(TestProtocol.class,
+ TestProtocol.versionID, new InetSocketAddress(ADDRESS, 20), conf, 15000L);
+ fail("We should not have reached here");
+ } catch (ConnectException ioe) {
+ //this is what we expected
+ }
+ }
+
+ private static final String ACL_CONFIG = "test.protocol.acl";
+
+ private static class TestPolicyProvider extends PolicyProvider {
+
+ @Override
+ public Service[] getServices() {
+ return new Service[] { new Service(ACL_CONFIG, TestProtocol.class) };
+ }
+
+ }
+
+ private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
+ SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));
+
+ Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);
+
+ TestProtocol proxy = null;
+
+ server.start();
+
+ InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
+ try {
+ proxy = (TestProtocol)RPC.getProxy(
+ TestProtocol.class, TestProtocol.versionID, addr, conf);
+ proxy.ping();
+
+ if (expectFailure) {
+ fail("Expect RPC.getProxy to fail with AuthorizationException!");
+ }
+ } catch (RemoteException e) {
+ if (expectFailure) {
+ assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
+ } else {
+ throw e;
+ }
+ } finally {
+ server.stop();
+ if (proxy != null) {
+ RPC.stopProxy(proxy);
+ }
+ }
+ }
+
+ public void testAuthorization() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(
+ ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true);
+
+ // Expect to succeed
+ conf.set(ACL_CONFIG, "*");
+ doRPCs(conf, false);
+
+ // Reset authorization to expect failure
+ conf.set(ACL_CONFIG, "invalid invalid");
+ doRPCs(conf, true);
+ }
+
+ public static void main(String[] args) throws Exception {
+
+ new TestRPC("test").testCalls();
+
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/log/TestLogLevel.java b/src/test/core/org/apache/hadoop/log/TestLogLevel.java
new file mode 100644
index 0000000000..f2443c04d9
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/log/TestLogLevel.java
@@ -0,0 +1,78 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.log;
+
+import java.io.*;
+import java.net.*;
+
+import org.apache.hadoop.http.HttpServer;
+
+import junit.framework.TestCase;
+import org.apache.commons.logging.*;
+import org.apache.commons.logging.impl.*;
+import org.apache.log4j.*;
+
+public class TestLogLevel extends TestCase {
+ static final PrintStream out = System.out;
+
+ public void testDynamicLogLevel() throws Exception {
+ String logName = TestLogLevel.class.getName();
+ Log testlog = LogFactory.getLog(logName);
+
+ //only test Log4JLogger
+ if (testlog instanceof Log4JLogger) {
+ Logger log = ((Log4JLogger)testlog).getLogger();
+ log.debug("log.debug1");
+ log.info("log.info1");
+ log.error("log.error1");
+ assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
+
+ HttpServer server = new HttpServer("..", "localhost", 22222, true);
+ server.start();
+ int port = server.getPort();
+
+ //servlet
+ URL url = new URL("http://localhost:" + port
+ + "/logLevel?log=" + logName + "&level=" + Level.ERROR);
+ out.println("*** Connecting to " + url);
+ URLConnection connection = url.openConnection();
+ connection.connect();
+
+ BufferedReader in = new BufferedReader(new InputStreamReader(
+ connection.getInputStream()));
+ for(String line; (line = in.readLine()) != null; out.println(line));
+ in.close();
+
+ log.debug("log.debug2");
+ log.info("log.info2");
+ log.error("log.error2");
+ assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));
+
+ //command line
+ String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG};
+ LogLevel.main(args);
+ log.debug("log.debug3");
+ log.info("log.info3");
+ log.error("log.error3");
+ assertTrue(Level.DEBUG.equals(log.getEffectiveLevel()));
+ }
+ else {
+ out.println(testlog.getClass() + " not tested.");
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/metrics/TestMetricsServlet.java b/src/test/core/org/apache/hadoop/metrics/TestMetricsServlet.java
new file mode 100644
index 0000000000..8d5cfc9a55
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/metrics/TestMetricsServlet.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.metrics.MetricsServlet.TagsMetricsPair;
+import org.apache.hadoop.metrics.spi.NoEmitMetricsContext;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+import org.mortbay.util.ajax.JSON;
+
+public class TestMetricsServlet extends TestCase {
+ MetricsContext nc1;
+ MetricsContext nc2;
+ // List containing nc1 and nc2.
+ List contexts;
+ OutputRecord outputRecord;
+
+ /**
+ * Initializes, for testing, two NoEmitMetricsContext's, and adds one value
+ * to the first of them.
+ */
+ public void setUp() throws IOException {
+ nc1 = new NoEmitMetricsContext();
+ nc1.init("test1", ContextFactory.getFactory());
+ nc2 = new NoEmitMetricsContext();
+ nc2.init("test2", ContextFactory.getFactory());
+ contexts = new ArrayList();
+ contexts.add(nc1);
+ contexts.add(nc2);
+
+ MetricsRecord r = nc1.createRecord("testRecord");
+
+ r.setTag("testTag1", "testTagValue1");
+ r.setTag("testTag2", "testTagValue2");
+ r.setMetric("testMetric1", 1);
+ r.setMetric("testMetric2", 33);
+ r.update();
+
+ Map> m = nc1.getAllRecords();
+ assertEquals(1, m.size());
+ assertEquals(1, m.values().size());
+ Collection outputRecords = m.values().iterator().next();
+ assertEquals(1, outputRecords.size());
+ outputRecord = outputRecords.iterator().next();
+ }
+
+
+
+ public void testTagsMetricsPair() throws IOException {
+ TagsMetricsPair pair = new TagsMetricsPair(outputRecord.getTagsCopy(),
+ outputRecord.getMetricsCopy());
+ String s = JSON.toString(pair);
+ assertEquals(
+ "[{\"testTag1\":\"testTagValue1\",\"testTag2\":\"testTagValue2\"},"+
+ "{\"testMetric1\":1,\"testMetric2\":33}]", s);
+ }
+
+ public void testGetMap() throws IOException {
+ MetricsServlet servlet = new MetricsServlet();
+ Map>> m = servlet.makeMap(contexts);
+ assertEquals("Map missing contexts", 2, m.size());
+ assertTrue(m.containsKey("test1"));
+
+ Map> m2 = m.get("test1");
+
+ assertEquals("Missing records", 1, m2.size());
+ assertTrue(m2.containsKey("testRecord"));
+ assertEquals("Wrong number of tags-values pairs.", 1, m2.get("testRecord").size());
+ }
+
+ public void testPrintMap() throws IOException {
+ StringWriter sw = new StringWriter();
+ PrintWriter out = new PrintWriter(sw);
+ MetricsServlet servlet = new MetricsServlet();
+ servlet.printMap(out, servlet.makeMap(contexts));
+
+ String EXPECTED = "" +
+ "test1\n" +
+ " testRecord\n" +
+ " {testTag1=testTagValue1,testTag2=testTagValue2}:\n" +
+ " testMetric1=1\n" +
+ " testMetric2=33\n" +
+ "test2\n";
+ assertEquals(EXPECTED, sw.toString());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/metrics/spi/TestOutputRecord.java b/src/test/core/org/apache/hadoop/metrics/spi/TestOutputRecord.java
new file mode 100644
index 0000000000..02e94a9f1b
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/metrics/spi/TestOutputRecord.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics.spi;
+
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
+
+import junit.framework.TestCase;
+
+public class TestOutputRecord extends TestCase {
+ public void testCopy() {
+ TagMap tags = new TagMap();
+ tags.put("tagkey", "tagval");
+ MetricMap metrics = new MetricMap();
+ metrics.put("metrickey", 123.4);
+ OutputRecord r = new OutputRecord(tags, metrics);
+
+ assertEquals(tags, r.getTagsCopy());
+ assertNotSame(tags, r.getTagsCopy());
+ assertEquals(metrics, r.getMetricsCopy());
+ assertNotSame(metrics, r.getMetricsCopy());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/net/StaticMapping.java b/src/test/core/org/apache/hadoop/net/StaticMapping.java
new file mode 100644
index 0000000000..c3923ed951
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/net/StaticMapping.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.util.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+
+/**
+ * Implements the {@link DNSToSwitchMapping} via static mappings. Used
+ * in testcases that simulate racks.
+ *
+ */
+public class StaticMapping extends Configured implements DNSToSwitchMapping {
+ public void setconf(Configuration conf) {
+ String[] mappings = conf.getStrings("hadoop.configured.node.mapping");
+ if (mappings != null) {
+ for (int i = 0; i < mappings.length; i++) {
+ String str = mappings[i];
+ String host = str.substring(0, str.indexOf('='));
+ String rack = str.substring(str.indexOf('=') + 1);
+ addNodeToRack(host, rack);
+ }
+ }
+ }
+ /* Only one instance per JVM */
+ private static Map nameToRackMap = new HashMap();
+
+ static synchronized public void addNodeToRack(String name, String rackId) {
+ nameToRackMap.put(name, rackId);
+ }
+ public List resolve(List names) {
+ List m = new ArrayList();
+ synchronized (nameToRackMap) {
+ for (String name : names) {
+ String rackId;
+ if ((rackId = nameToRackMap.get(name)) != null) {
+ m.add(rackId);
+ } else {
+ m.add(NetworkTopology.DEFAULT_RACK);
+ }
+ }
+ return m;
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/net/TestDNS.java b/src/test/core/org/apache/hadoop/net/TestDNS.java
new file mode 100644
index 0000000000..5825ecf8c6
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/net/TestDNS.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.net;
+
+import junit.framework.TestCase;
+
+import java.net.UnknownHostException;
+import java.net.InetAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import javax.naming.NameNotFoundException;
+
+/**
+ *
+ */
+public class TestDNS extends TestCase {
+
+ private static final Log LOG = LogFactory.getLog(TestDNS.class);
+ private static final String DEFAULT = "default";
+
+ /**
+ * Constructs a test case with the given name.
+ *
+ * @param name test name
+ */
+ public TestDNS(String name) {
+ super(name);
+ }
+
+ /**
+ * Test that asking for the default hostname works
+ * @throws Exception if hostname lookups fail */
+ public void testGetLocalHost() throws Exception {
+ String hostname = DNS.getDefaultHost(DEFAULT);
+ assertNotNull(hostname);
+ }
+
+ /**
+ * Test that repeated calls to getting the local host are fairly fast, and
+ * hence that caching is being used
+ * @throws Exception if hostname lookups fail
+ */
+ public void testGetLocalHostIsFast() throws Exception {
+ String hostname = DNS.getDefaultHost(DEFAULT);
+ assertNotNull(hostname);
+ long t1 = System.currentTimeMillis();
+ String hostname2 = DNS.getDefaultHost(DEFAULT);
+ long t2 = System.currentTimeMillis();
+ String hostname3 = DNS.getDefaultHost(DEFAULT);
+ long t3 = System.currentTimeMillis();
+ assertEquals(hostname3, hostname2);
+ assertEquals(hostname2, hostname);
+ long interval2 = t3 - t2;
+ assertTrue(
+ "It is taking to long to determine the local host -caching is not working",
+ interval2 < 20000);
+ }
+
+ /**
+ * Test that our local IP address is not null
+ * @throws Exception if something went wrong
+ */
+ public void testLocalHostHasAnAddress() throws Exception {
+ assertNotNull(getLocalIPAddr());
+ }
+
+ private InetAddress getLocalIPAddr() throws UnknownHostException {
+ String hostname = DNS.getDefaultHost(DEFAULT);
+ InetAddress localhost = InetAddress.getByName(hostname);
+ return localhost;
+ }
+
+ /**
+ * Test that passing a null pointer is as the interface
+ * fails with a NullPointerException
+ * @throws Exception if something went wrong
+ */
+ public void testNullInterface() throws Exception {
+ try {
+ String host = DNS.getDefaultHost(null);
+ fail("Expected a NullPointerException, got " + host);
+ } catch (NullPointerException expected) {
+ //this is expected
+ }
+ }
+
+ /**
+ * Get the IP addresses of an unknown interface, expect to get something
+ * back
+ * @throws Exception if something went wrong
+ */
+ public void testIPsOfUnknownInterface() throws Exception {
+ String[] ips = DNS.getIPs("name-of-an-unknown-interface");
+ assertNotNull(ips);
+ assertTrue(ips.length > 0);
+ }
+
+ /**
+ * TestCase: get our local address and reverse look it up
+ * @throws Exception if that fails
+ */
+ public void testRDNS() throws Exception {
+ InetAddress localhost = getLocalIPAddr();
+ try {
+ String s = DNS.reverseDns(localhost, null);
+ LOG.info("Local revers DNS hostname is " + s);
+ } catch (NameNotFoundException e) {
+ if (!localhost.isLinkLocalAddress() || localhost.isLoopbackAddress()) {
+ //these addresses probably won't work with rDNS anyway, unless someone
+ //has unusual entries in their DNS server mapping 1.0.0.127 to localhost
+ LOG.info("Reverse DNS failing as due to incomplete networking", e);
+ LOG.info("Address is " + localhost
+ + " Loopback=" + localhost.isLoopbackAddress()
+ + " Linklocal=" + localhost.isLinkLocalAddress());
+ }
+
+ }
+ }
+
+ /**
+ * Test that the name "localhost" resolves to something.
+ *
+ * If this fails, your machine's network is in a mess, go edit /etc/hosts
+ * @throws Exception for any problems
+ */
+ public void testLocalhostResolves() throws Exception {
+ InetAddress localhost = InetAddress.getByName("localhost");
+ assertNotNull("localhost is null", localhost);
+ LOG.info("Localhost IPAddr is " + localhost.toString());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/net/TestScriptBasedMapping.java b/src/test/core/org/apache/hadoop/net/TestScriptBasedMapping.java
new file mode 100644
index 0000000000..144dbaa0e3
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/net/TestScriptBasedMapping.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+
+import junit.framework.TestCase;
+
+public class TestScriptBasedMapping extends TestCase {
+
+ public void testNoArgsMeansNoResult() {
+ ScriptBasedMapping mapping = new ScriptBasedMapping();
+
+ Configuration conf = new Configuration();
+ conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
+ ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
+ conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
+
+ mapping.setConf(conf);
+
+ List names = new ArrayList();
+ names.add("some.machine.name");
+ names.add("other.machine.name");
+
+ List result = mapping.resolve(names);
+ assertNull(result);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/net/TestSocketIOWithTimeout.java b/src/test/core/org/apache/hadoop/net/TestSocketIOWithTimeout.java
new file mode 100644
index 0000000000..53f320917c
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/net/TestSocketIOWithTimeout.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.SocketTimeoutException;
+import java.nio.channels.Pipe;
+import java.util.Arrays;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import junit.framework.TestCase;
+
+/**
+ * This tests timout out from SocketInputStream and
+ * SocketOutputStream using pipes.
+ *
+ * Normal read and write using these streams are tested by pretty much
+ * every DFS unit test.
+ */
+public class TestSocketIOWithTimeout extends TestCase {
+
+ static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
+
+ private static int TIMEOUT = 1*1000;
+ private static String TEST_STRING = "1234567890";
+
+ private void doIO(InputStream in, OutputStream out) throws IOException {
+ /* Keep on writing or reading until we get SocketTimeoutException.
+ * It expects this exception to occur within 100 millis of TIMEOUT.
+ */
+ byte buf[] = new byte[4192];
+
+ while (true) {
+ long start = System.currentTimeMillis();
+ try {
+ if (in != null) {
+ in.read(buf);
+ } else {
+ out.write(buf);
+ }
+ } catch (SocketTimeoutException e) {
+ long diff = System.currentTimeMillis() - start;
+ LOG.info("Got SocketTimeoutException as expected after " +
+ diff + " millis : " + e.getMessage());
+ assertTrue(Math.abs(TIMEOUT - diff) <= 200);
+ break;
+ }
+ }
+ }
+
+ /**
+ * Just reads one byte from the input stream.
+ */
+ static class ReadRunnable implements Runnable {
+ private InputStream in;
+
+ public ReadRunnable(InputStream in) {
+ this.in = in;
+ }
+ public void run() {
+ try {
+ in.read();
+ } catch (IOException e) {
+ LOG.info("Got expection while reading as expected : " +
+ e.getMessage());
+ return;
+ }
+ assertTrue(false);
+ }
+ }
+
+ public void testSocketIOWithTimeout() throws IOException {
+
+ // first open pipe:
+ Pipe pipe = Pipe.open();
+ Pipe.SourceChannel source = pipe.source();
+ Pipe.SinkChannel sink = pipe.sink();
+
+ try {
+ InputStream in = new SocketInputStream(source, TIMEOUT);
+ OutputStream out = new SocketOutputStream(sink, TIMEOUT);
+
+ byte[] writeBytes = TEST_STRING.getBytes();
+ byte[] readBytes = new byte[writeBytes.length];
+
+ out.write(writeBytes);
+ doIO(null, out);
+
+ in.read(readBytes);
+ assertTrue(Arrays.equals(writeBytes, readBytes));
+ doIO(in, null);
+
+ /*
+ * Verify that it handles interrupted threads properly.
+ * Use a large timeout and expect the thread to return quickly.
+ */
+ in = new SocketInputStream(source, 0);
+ Thread thread = new Thread(new ReadRunnable(in));
+ thread.start();
+
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException ignored) {}
+
+ thread.interrupt();
+
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ throw new IOException("Unexpected InterruptedException : " + e);
+ }
+
+ //make sure the channels are still open
+ assertTrue(source.isOpen());
+ assertTrue(sink.isOpen());
+
+ out.close();
+ assertFalse(sink.isOpen());
+
+ // close sink and expect -1 from source.read()
+ assertEquals(-1, in.read());
+
+ // make sure close() closes the underlying channel.
+ in.close();
+ assertFalse(source.isOpen());
+
+ } finally {
+ if (source != null) {
+ source.close();
+ }
+ if (sink != null) {
+ sink.close();
+ }
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/record/FromCpp.java b/src/test/core/org/apache/hadoop/record/FromCpp.java
new file mode 100644
index 0000000000..2cd2271f43
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/record/FromCpp.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.TreeMap;
+import junit.framework.*;
+
+/**
+ */
+public class FromCpp extends TestCase {
+
+ public FromCpp(String testName) {
+ super(testName);
+ }
+
+ protected void setUp() throws Exception {
+ }
+
+ protected void tearDown() throws Exception {
+ }
+
+ public void testBinary() {
+ File tmpfile;
+ try {
+ tmpfile = new File("/temp/hadooptmp.dat");
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ FileInputStream istream = new FileInputStream(tmpfile);
+ BinaryRecordInput in = new BinaryRecordInput(istream);
+ RecRecord1 r2 = new RecRecord1();
+ r2.deserialize(in, "");
+ istream.close();
+ assertTrue(r1.equals(r2));
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ public void testCsv() {
+ File tmpfile;
+ try {
+ tmpfile = new File("/temp/hadooptmp.txt");
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ FileInputStream istream = new FileInputStream(tmpfile);
+ CsvRecordInput in = new CsvRecordInput(istream);
+ RecRecord1 r2 = new RecRecord1();
+ r2.deserialize(in, "");
+ istream.close();
+ assertTrue(r1.equals(r2));
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ public void testXml() {
+ File tmpfile;
+ try {
+ tmpfile = new File("/temp/hadooptmp.xml");
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ FileInputStream istream = new FileInputStream(tmpfile);
+ XmlRecordInput in = new XmlRecordInput(istream);
+ RecRecord1 r2 = new RecRecord1();
+ r2.deserialize(in, "");
+ istream.close();
+ assertTrue(r1.equals(r2));
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/record/RecordBench.java b/src/test/core/org/apache/hadoop/record/RecordBench.java
new file mode 100644
index 0000000000..1cba75ed80
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/record/RecordBench.java
@@ -0,0 +1,313 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.lang.reflect.Array;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Random;
+
+/**
+ * Benchmark for various types of serializations
+ */
+public class RecordBench {
+
+ private static class Times {
+ long init;
+ long serialize;
+ long deserialize;
+ long write;
+ long readFields;
+ };
+
+ private static final long SEED = 0xDEADBEEFL;
+ private static final Random rand = new Random();
+
+ /** Do not allow to create a new instance of RecordBench */
+ private RecordBench() {}
+
+ private static void initBuffers(Record[] buffers) {
+ final int BUFLEN = 32;
+ for (int idx = 0; idx < buffers.length; idx++) {
+ buffers[idx] = new RecBuffer();
+ int buflen = rand.nextInt(BUFLEN);
+ byte[] bytes = new byte[buflen];
+ rand.nextBytes(bytes);
+ ((RecBuffer)buffers[idx]).setData(new Buffer(bytes));
+ }
+ }
+
+ private static void initStrings(Record[] strings) {
+ final int STRLEN = 32;
+ for (int idx = 0; idx < strings.length; idx++) {
+ strings[idx] = new RecString();
+ int strlen = rand.nextInt(STRLEN);
+ StringBuilder sb = new StringBuilder(strlen);
+ for (int ich = 0; ich < strlen; ich++) {
+ int cpt = 0;
+ while (true) {
+ cpt = rand.nextInt(0x10FFFF+1);
+ if (Utils.isValidCodePoint(cpt)) {
+ break;
+ }
+ }
+ sb.appendCodePoint(cpt);
+ }
+ ((RecString)strings[idx]).setData(sb.toString());
+ }
+ }
+
+ private static void initInts(Record[] ints) {
+ for (int idx = 0; idx < ints.length; idx++) {
+ ints[idx] = new RecInt();
+ ((RecInt)ints[idx]).setData(rand.nextInt());
+ }
+ }
+
+ private static Record[] makeArray(String type, int numRecords, Times times) {
+ Method init = null;
+ try {
+ init = RecordBench.class.getDeclaredMethod("init"+
+ toCamelCase(type) + "s",
+ new Class[] {Record[].class});
+ } catch (NoSuchMethodException ex) {
+ throw new RuntimeException(ex);
+ }
+
+ Record[] records = new Record[numRecords];
+ times.init = System.nanoTime();
+ try {
+ init.invoke(null, new Object[]{records});
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ times.init = System.nanoTime() - times.init;
+ return records;
+ }
+
+ private static void runBinaryBench(String type, int numRecords, Times times)
+ throws IOException {
+ Record[] records = makeArray(type, numRecords, times);
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ BinaryRecordOutput rout = new BinaryRecordOutput(bout);
+ DataOutputStream dout = new DataOutputStream(bout);
+
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].serialize(rout);
+ }
+ bout.reset();
+
+ times.serialize = System.nanoTime();
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].serialize(rout);
+ }
+ times.serialize = System.nanoTime() - times.serialize;
+
+ byte[] serialized = bout.toByteArray();
+ ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
+ BinaryRecordInput rin = new BinaryRecordInput(bin);
+
+ times.deserialize = System.nanoTime();
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].deserialize(rin);
+ }
+ times.deserialize = System.nanoTime() - times.deserialize;
+
+ bout.reset();
+
+ times.write = System.nanoTime();
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].write(dout);
+ }
+ times.write = System.nanoTime() - times.write;
+
+ bin.reset();
+ DataInputStream din = new DataInputStream(bin);
+
+ times.readFields = System.nanoTime();
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].readFields(din);
+ }
+ times.readFields = System.nanoTime() - times.readFields;
+ }
+
+ private static void runCsvBench(String type, int numRecords, Times times)
+ throws IOException {
+ Record[] records = makeArray(type, numRecords, times);
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ CsvRecordOutput rout = new CsvRecordOutput(bout);
+
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].serialize(rout);
+ }
+ bout.reset();
+
+ times.serialize = System.nanoTime();
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].serialize(rout);
+ }
+ times.serialize = System.nanoTime() - times.serialize;
+
+ byte[] serialized = bout.toByteArray();
+ ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
+ CsvRecordInput rin = new CsvRecordInput(bin);
+
+ times.deserialize = System.nanoTime();
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].deserialize(rin);
+ }
+ times.deserialize = System.nanoTime() - times.deserialize;
+ }
+
+ private static void runXmlBench(String type, int numRecords, Times times)
+ throws IOException {
+ Record[] records = makeArray(type, numRecords, times);
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ XmlRecordOutput rout = new XmlRecordOutput(bout);
+
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].serialize(rout);
+ }
+ bout.reset();
+
+ bout.write("\n".getBytes());
+
+ times.serialize = System.nanoTime();
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].serialize(rout);
+ }
+ times.serialize = System.nanoTime() - times.serialize;
+
+ bout.write("\n".getBytes());
+
+ byte[] serialized = bout.toByteArray();
+ ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
+
+ times.deserialize = System.nanoTime();
+ XmlRecordInput rin = new XmlRecordInput(bin);
+ for(int idx = 0; idx < numRecords; idx++) {
+ records[idx].deserialize(rin);
+ }
+ times.deserialize = System.nanoTime() - times.deserialize;
+ }
+
+ private static void printTimes(String type,
+ String format,
+ int numRecords,
+ Times times) {
+ System.out.println("Type: " + type + " Format: " + format +
+ " #Records: "+numRecords);
+ if (times.init != 0) {
+ System.out.println("Initialization Time (Per record) : "+
+ times.init/numRecords + " Nanoseconds");
+ }
+
+ if (times.serialize != 0) {
+ System.out.println("Serialization Time (Per Record) : "+
+ times.serialize/numRecords + " Nanoseconds");
+ }
+
+ if (times.deserialize != 0) {
+ System.out.println("Deserialization Time (Per Record) : "+
+ times.deserialize/numRecords + " Nanoseconds");
+ }
+
+ if (times.write != 0) {
+ System.out.println("Write Time (Per Record) : "+
+ times.write/numRecords + " Nanoseconds");
+ }
+
+ if (times.readFields != 0) {
+ System.out.println("ReadFields Time (Per Record) : "+
+ times.readFields/numRecords + " Nanoseconds");
+ }
+
+ System.out.println();
+ }
+
+ private static String toCamelCase(String inp) {
+ char firstChar = inp.charAt(0);
+ if (Character.isLowerCase(firstChar)) {
+ return ""+Character.toUpperCase(firstChar) + inp.substring(1);
+ }
+ return inp;
+ }
+
+ private static void exitOnError() {
+ String usage = "RecordBench {buffer|string|int}"+
+ " {binary|csv|xml} ";
+ System.out.println(usage);
+ System.exit(1);
+ }
+
+ /**
+ * @param args the command line arguments
+ */
+ public static void main(String[] args) throws IOException {
+ String version = "RecordBench v0.1";
+ System.out.println(version+"\n");
+
+ if (args.length != 3) {
+ exitOnError();
+ }
+
+ String typeName = args[0];
+ String format = args[1];
+ int numRecords = Integer.decode(args[2]).intValue();
+
+ Method bench = null;
+ try {
+ bench = RecordBench.class.getDeclaredMethod("run"+
+ toCamelCase(format) + "Bench",
+ new Class[] {String.class, Integer.TYPE, Times.class});
+ } catch (NoSuchMethodException ex) {
+ ex.printStackTrace();
+ exitOnError();
+ }
+
+ if (numRecords < 0) {
+ exitOnError();
+ }
+
+ // dry run
+ rand.setSeed(SEED);
+ Times times = new Times();
+ try {
+ bench.invoke(null, new Object[] {typeName, numRecords, times});
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ System.exit(1);
+ }
+
+ // timed run
+ rand.setSeed(SEED);
+ try {
+ bench.invoke(null, new Object[] {typeName, numRecords, times});
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ System.exit(1);
+ }
+ printTimes(typeName, format, numRecords, times);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/record/TestBuffer.java b/src/test/core/org/apache/hadoop/record/TestBuffer.java
new file mode 100644
index 0000000000..3012fa6ff4
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/record/TestBuffer.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record;
+
+import junit.framework.*;
+
+/**
+ * A Unit test for Record I/O Buffer class
+ */
+public class TestBuffer extends TestCase {
+
+ public TestBuffer(String testName) {
+ super(testName);
+ }
+
+ /**
+ * Test of set method, of class org.apache.hadoop.record.Buffer.
+ */
+ public void testSet() {
+ final byte[] bytes = new byte[10];
+ final Buffer instance = new Buffer();
+
+ instance.set(bytes);
+
+ assertEquals("set failed", bytes, instance.get());
+ }
+
+ /**
+ * Test of copy method, of class org.apache.hadoop.record.Buffer.
+ */
+ public void testCopy() {
+ final byte[] bytes = new byte[10];
+ final int offset = 6;
+ final int length = 3;
+ for (int idx = 0; idx < 10; idx ++) {
+ bytes[idx] = (byte) idx;
+ }
+ final Buffer instance = new Buffer();
+
+ instance.copy(bytes, offset, length);
+
+ assertEquals("copy failed", 3, instance.getCapacity());
+ assertEquals("copy failed", 3, instance.get().length);
+ for (int idx = 0; idx < 3; idx++) {
+ assertEquals("Buffer content corrupted", idx+6, instance.get()[idx]);
+ }
+ }
+
+ /**
+ * Test of getCount method, of class org.apache.hadoop.record.Buffer.
+ */
+ public void testGetCount() {
+ final Buffer instance = new Buffer();
+
+ final int expResult = 0;
+ final int result = instance.getCount();
+ assertEquals("getSize failed", expResult, result);
+ }
+
+ /**
+ * Test of getCapacity method, of class org.apache.hadoop.record.Buffer.
+ */
+ public void testGetCapacity() {
+ final Buffer instance = new Buffer();
+
+ final int expResult = 0;
+ final int result = instance.getCapacity();
+ assertEquals("getCapacity failed", expResult, result);
+
+ instance.setCapacity(100);
+ assertEquals("setCapacity failed", 100, instance.getCapacity());
+ }
+
+ /**
+ * Test of truncate method, of class org.apache.hadoop.record.Buffer.
+ */
+ public void testTruncate() {
+ final Buffer instance = new Buffer();
+ instance.setCapacity(100);
+ assertEquals("setCapacity failed", 100, instance.getCapacity());
+
+ instance.truncate();
+ assertEquals("truncate failed", 0, instance.getCapacity());
+ }
+
+ /**
+ * Test of append method, of class org.apache.hadoop.record.Buffer.
+ */
+ public void testAppend() {
+ final byte[] bytes = new byte[100];
+ final int offset = 0;
+ final int length = 100;
+ for (int idx = 0; idx < 100; idx++) {
+ bytes[idx] = (byte) (100-idx);
+ }
+
+ final Buffer instance = new Buffer();
+
+ instance.append(bytes, offset, length);
+
+ assertEquals("Buffer size mismatch", 100, instance.getCount());
+
+ for (int idx = 0; idx < 100; idx++) {
+ assertEquals("Buffer contents corrupted", 100-idx, instance.get()[idx]);
+ }
+
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/record/TestRecordIO.java b/src/test/core/org/apache/hadoop/record/TestRecordIO.java
new file mode 100644
index 0000000000..163ec1b00b
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/record/TestRecordIO.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record;
+
+import java.io.IOException;
+import junit.framework.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+import java.util.TreeMap;
+
+/**
+ */
+public class TestRecordIO extends TestCase {
+
+ public TestRecordIO(String testName) {
+ super(testName);
+ }
+
+ protected void setUp() throws Exception {
+ }
+
+ protected void tearDown() throws Exception {
+ }
+
+ public void testBinary() {
+ File tmpfile;
+ try {
+ tmpfile = File.createTempFile("hadooprec", ".dat");
+ FileOutputStream ostream = new FileOutputStream(tmpfile);
+ BinaryRecordOutput out = new BinaryRecordOutput(ostream);
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(-4567);
+ r1.setLongVal(-2367L);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ RecRecord0 r0 = new RecRecord0();
+ r0.setStringVal("other random text");
+ r1.setRecordVal(r0);
+ r1.serialize(out, "");
+ ostream.close();
+ FileInputStream istream = new FileInputStream(tmpfile);
+ BinaryRecordInput in = new BinaryRecordInput(istream);
+ RecRecord1 r2 = new RecRecord1();
+ r2.deserialize(in, "");
+ istream.close();
+ tmpfile.delete();
+ assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ public void testCsv() {
+ File tmpfile;
+ try {
+ tmpfile = File.createTempFile("hadooprec", ".txt");
+ FileOutputStream ostream = new FileOutputStream(tmpfile);
+ CsvRecordOutput out = new CsvRecordOutput(ostream);
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ RecRecord0 r0 = new RecRecord0();
+ r0.setStringVal("other random text");
+ r1.setRecordVal(r0);
+ r1.serialize(out, "");
+ ostream.close();
+ FileInputStream istream = new FileInputStream(tmpfile);
+ CsvRecordInput in = new CsvRecordInput(istream);
+ RecRecord1 r2 = new RecRecord1();
+ r2.deserialize(in, "");
+ istream.close();
+ tmpfile.delete();
+ assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
+
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ public void testToString() {
+ try {
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("random text");
+ byte[] barr = new byte[256];
+ for (int idx = 0; idx < 256; idx++) {
+ barr[idx] = (byte) idx;
+ }
+ r1.setBufferVal(new Buffer(barr));
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ RecRecord0 r0 = new RecRecord0();
+ r0.setStringVal("other random text");
+ r1.setRecordVal(r0);
+ System.err.println("Illustrating toString bug"+r1.toString());
+ System.err.println("Illustrating toString bug"+r1.toString());
+ } catch (Throwable ex) {
+ assertTrue("Record.toString cannot be invoked twice in succession."+
+ "This bug has been fixed in the latest version.", false);
+ }
+ }
+
+ public void testXml() {
+ File tmpfile;
+ try {
+ tmpfile = File.createTempFile("hadooprec", ".xml");
+ FileOutputStream ostream = new FileOutputStream(tmpfile);
+ XmlRecordOutput out = new XmlRecordOutput(ostream);
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("ran\002dom < %text<&more\uffff");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ RecRecord0 r0 = new RecRecord0();
+ r0.setStringVal("other %rando\007m & >&more text");
+ r1.setRecordVal(r0);
+ r1.serialize(out, "");
+ ostream.close();
+ FileInputStream istream = new FileInputStream(tmpfile);
+ XmlRecordInput in = new XmlRecordInput(istream);
+ RecRecord1 r2 = new RecRecord1();
+ r2.deserialize(in, "");
+ istream.close();
+ tmpfile.delete();
+ assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ public void testCloneable() {
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(-4567);
+ r1.setLongVal(-2367L);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ RecRecord0 r0 = new RecRecord0();
+ r0.setStringVal("other random text");
+ r1.setRecordVal(r0);
+ try {
+ RecRecord1 r2 = (RecRecord1) r1.clone();
+ assertTrue("Cloneable semantics violated. r1==r2", r1 != r2);
+ assertTrue("Cloneable semantics violated. r1.getClass() != r2.getClass()",
+ r1.getClass() == r2.getClass());
+ assertTrue("Cloneable semantics violated. !r2.equals(r1)", r2.equals(r1));
+ } catch (final CloneNotSupportedException ex) {
+ ex.printStackTrace();
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/record/TestRecordVersioning.java b/src/test/core/org/apache/hadoop/record/TestRecordVersioning.java
new file mode 100644
index 0000000000..129ba2ced8
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/record/TestRecordVersioning.java
@@ -0,0 +1,239 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record;
+
+import java.io.IOException;
+import junit.framework.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+import java.util.TreeMap;
+import org.apache.hadoop.record.meta.RecordTypeInfo;
+
+/**
+ */
+public class TestRecordVersioning extends TestCase {
+
+ public TestRecordVersioning(String testName) {
+ super(testName);
+ }
+
+ protected void setUp() throws Exception {
+ }
+
+ protected void tearDown() throws Exception {
+ }
+
+ /*
+ * basic versioning
+ * write out a record and its type info, read it back using its typeinfo
+ */
+ public void testBasic() {
+ File tmpfile, tmpRTIfile;
+ try {
+ tmpfile = File.createTempFile("hadooprec", ".dat");
+ tmpRTIfile = File.createTempFile("hadooprti", ".dat");
+ FileOutputStream ostream = new FileOutputStream(tmpfile);
+ BinaryRecordOutput out = new BinaryRecordOutput(ostream);
+ FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
+ BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(-4567);
+ r1.setLongVal(-2367L);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ RecRecord0 r0 = new RecRecord0();
+ r0.setStringVal("other random text");
+ r1.setRecordVal(r0);
+ r1.serialize(out, "");
+ ostream.close();
+ // write out the type info
+ RecRecord1.getTypeInfo().serialize(outRTI);
+ oRTIstream.close();
+
+ // read
+ FileInputStream istream = new FileInputStream(tmpfile);
+ BinaryRecordInput in = new BinaryRecordInput(istream);
+ FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
+ BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
+ RecordTypeInfo rti = new RecordTypeInfo();
+ rti.deserialize(inRTI);
+ iRTIstream.close();
+ RecRecord1.setTypeFilter(rti);
+ RecRecord1 r2 = new RecRecord1();
+ r2.deserialize(in, "");
+ istream.close();
+ tmpfile.delete();
+ tmpRTIfile.delete();
+ assertTrue("Serialized and deserialized versioned records do not match.", r1.equals(r2));
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ /*
+ * versioning
+ * write out a record and its type info, read back a similar record using the written record's typeinfo
+ */
+ public void testVersioning() {
+ File tmpfile, tmpRTIfile;
+ try {
+ tmpfile = File.createTempFile("hadooprec", ".dat");
+ tmpRTIfile = File.createTempFile("hadooprti", ".dat");
+ FileOutputStream ostream = new FileOutputStream(tmpfile);
+ BinaryRecordOutput out = new BinaryRecordOutput(ostream);
+ FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
+ BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
+
+ // we create an array of records to write
+ ArrayList recsWrite = new ArrayList();
+ int i, j, k, l;
+ for (i=0; i<5; i++) {
+ RecRecordOld s1Rec = new RecRecordOld();
+
+ s1Rec.setName("This is record s1: " + i);
+
+ ArrayList iA = new ArrayList();
+ for (j=0; j<3; j++) {
+ iA.add(new Long(i+j));
+ }
+ s1Rec.setIvec(iA);
+
+ ArrayList> ssVec = new ArrayList>();
+ for (j=0; j<2; j++) {
+ ArrayList sVec = new ArrayList();
+ for (k=0; k<3; k++) {
+ RecRecord0 sRec = new RecRecord0("This is record s: ("+j+": "+k+")");
+ sVec.add(sRec);
+ }
+ ssVec.add(sVec);
+ }
+ s1Rec.setSvec(ssVec);
+
+ s1Rec.setInner(new RecRecord0("This is record s: " + i));
+
+ ArrayList>> aaaVec = new ArrayList>>();
+ for (l=0; l<2; l++) {
+ ArrayList> aaVec = new ArrayList>();
+ for (j=0; j<2; j++) {
+ ArrayList aVec = new ArrayList();
+ for (k=0; k<3; k++) {
+ aVec.add(new String("THis is a nested string: (" + l + ": " + j + ": " + k + ")"));
+ }
+ aaVec.add(aVec);
+ }
+ aaaVec.add(aaVec);
+ }
+ s1Rec.setStrvec(aaaVec);
+
+ s1Rec.setI1(100+i);
+
+ java.util.TreeMap map1 = new java.util.TreeMap();
+ map1.put(new Byte("23"), "23");
+ map1.put(new Byte("11"), "11");
+ s1Rec.setMap1(map1);
+
+ java.util.TreeMap m1 = new java.util.TreeMap();
+ java.util.TreeMap m2 = new java.util.TreeMap();
+ m1.put(new Integer(5), 5L);
+ m1.put(new Integer(10), 10L);
+ m2.put(new Integer(15), 15L);
+ m2.put(new Integer(20), 20L);
+ java.util.ArrayList> vm1 = new java.util.ArrayList>();
+ vm1.add(m1);
+ vm1.add(m2);
+ s1Rec.setMvec1(vm1);
+ java.util.ArrayList> vm2 = new java.util.ArrayList>();
+ vm2.add(m1);
+ s1Rec.setMvec2(vm2);
+
+ // add to our list
+ recsWrite.add(s1Rec);
+ }
+
+ // write out to file
+ for (RecRecordOld rec: recsWrite) {
+ rec.serialize(out);
+ }
+ ostream.close();
+ // write out the type info
+ RecRecordOld.getTypeInfo().serialize(outRTI);
+ oRTIstream.close();
+
+ // read
+ FileInputStream istream = new FileInputStream(tmpfile);
+ BinaryRecordInput in = new BinaryRecordInput(istream);
+ FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
+ BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
+ RecordTypeInfo rti = new RecordTypeInfo();
+
+ // read type info
+ rti.deserialize(inRTI);
+ iRTIstream.close();
+ RecRecordNew.setTypeFilter(rti);
+
+ // read records
+ ArrayList recsRead = new ArrayList();
+ for (i=0; i> ss2Vec = s2In.getStrvec().get(j);
+ ArrayList> ss1Vec = s1Out.getStrvec().get(j);
+ for (k=0; k s2Vec = ss2Vec.get(k);
+ ArrayList s1Vec = ss1Vec.get(k);
+ for (l=0; l());
+ r1.setMapVal(new TreeMap());
+ r1.serialize(out, "");
+ ostream.close();
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ public void testCsv() {
+ File tmpfile;
+ try {
+ tmpfile = new File("/tmp/hadooptemp.txt");
+ FileOutputStream ostream = new FileOutputStream(tmpfile);
+ CsvRecordOutput out = new CsvRecordOutput(ostream);
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ r1.serialize(out, "");
+ ostream.close();
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ public void testXml() {
+ File tmpfile;
+ try {
+ tmpfile = new File("/tmp/hadooptemp.xml");
+ FileOutputStream ostream = new FileOutputStream(tmpfile);
+ XmlRecordOutput out = new XmlRecordOutput(ostream);
+ RecRecord1 r1 = new RecRecord1();
+ r1.setBoolVal(true);
+ r1.setByteVal((byte)0x66);
+ r1.setFloatVal(3.145F);
+ r1.setDoubleVal(1.5234);
+ r1.setIntVal(4567);
+ r1.setLongVal(0x5a5a5a5a5a5aL);
+ r1.setStringVal("random text");
+ r1.setBufferVal(new Buffer());
+ r1.setVectorVal(new ArrayList());
+ r1.setMapVal(new TreeMap());
+ r1.serialize(out, "");
+ ostream.close();
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/security/TestAccessControlList.java b/src/test/core/org/apache/hadoop/security/TestAccessControlList.java
new file mode 100644
index 0000000000..57c5abf875
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/security/TestAccessControlList.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import java.util.Iterator;
+import java.util.Set;
+
+import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+
+import junit.framework.TestCase;
+
+public class TestAccessControlList extends TestCase {
+
+ public void testWildCardAccessControlList() throws Exception {
+ AccessControlList acl;
+
+ acl = new AccessControlList("*");
+ assertTrue(acl.allAllowed());
+
+ acl = new AccessControlList(" * ");
+ assertTrue(acl.allAllowed());
+
+ acl = new AccessControlList(" *");
+ assertTrue(acl.allAllowed());
+
+ acl = new AccessControlList("* ");
+ assertTrue(acl.allAllowed());
+ }
+
+ public void testAccessControlList() throws Exception {
+ AccessControlList acl;
+ Set users;
+ Set groups;
+
+ acl = new AccessControlList("drwho tardis");
+ users = acl.getUsers();
+ assertEquals(users.size(), 1);
+ assertEquals(users.iterator().next(), "drwho");
+ groups = acl.getGroups();
+ assertEquals(groups.size(), 1);
+ assertEquals(groups.iterator().next(), "tardis");
+
+ acl = new AccessControlList("drwho");
+ users = acl.getUsers();
+ assertEquals(users.size(), 1);
+ assertEquals(users.iterator().next(), "drwho");
+ groups = acl.getGroups();
+ assertEquals(groups.size(), 0);
+
+ acl = new AccessControlList("drwho ");
+ users = acl.getUsers();
+ assertEquals(users.size(), 1);
+ assertEquals(users.iterator().next(), "drwho");
+ groups = acl.getGroups();
+ assertEquals(groups.size(), 0);
+
+ acl = new AccessControlList(" tardis");
+ users = acl.getUsers();
+ assertEquals(users.size(), 0);
+ groups = acl.getGroups();
+ assertEquals(groups.size(), 1);
+ assertEquals(groups.iterator().next(), "tardis");
+
+ Iterator iter;
+ acl = new AccessControlList("drwho,joe tardis,users");
+ users = acl.getUsers();
+ assertEquals(users.size(), 2);
+ iter = users.iterator();
+ assertEquals(iter.next(), "drwho");
+ assertEquals(iter.next(), "joe");
+ groups = acl.getGroups();
+ assertEquals(groups.size(), 2);
+ iter = groups.iterator();
+ assertEquals(iter.next(), "tardis");
+ assertEquals(iter.next(), "users");
+
+ acl = new AccessControlList("drwho,joe tardis, users");
+ users = acl.getUsers();
+ assertEquals(users.size(), 2);
+ iter = users.iterator();
+ assertEquals(iter.next(), "drwho");
+ assertEquals(iter.next(), "joe");
+ groups = acl.getGroups();
+ assertEquals(groups.size(), 2);
+ iter = groups.iterator();
+ assertEquals(iter.next(), "tardis");
+ assertEquals(iter.next(), "users");
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/security/TestAccessToken.java b/src/test/core/org/apache/hadoop/security/TestAccessToken.java
new file mode 100644
index 0000000000..cd3cc4c482
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/security/TestAccessToken.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import java.util.EnumSet;
+
+import org.apache.hadoop.io.TestWritable;
+
+import junit.framework.TestCase;
+
+/** Unit tests for access tokens */
+public class TestAccessToken extends TestCase {
+ long accessKeyUpdateInterval = 10 * 60 * 1000; // 10 mins
+ long accessTokenLifetime = 2 * 60 * 1000; // 2 mins
+ long blockID1 = 0L;
+ long blockID2 = 10L;
+ long blockID3 = -108L;
+
+ /** test Writable */
+ public void testWritable() throws Exception {
+ TestWritable.testWritable(ExportedAccessKeys.DUMMY_KEYS);
+ AccessTokenHandler handler = new AccessTokenHandler(true,
+ accessKeyUpdateInterval, accessTokenLifetime);
+ ExportedAccessKeys keys = handler.exportKeys();
+ TestWritable.testWritable(keys);
+ TestWritable.testWritable(AccessToken.DUMMY_TOKEN);
+ AccessToken token = handler.generateToken(blockID3, EnumSet
+ .allOf(AccessTokenHandler.AccessMode.class));
+ TestWritable.testWritable(token);
+ }
+
+ private void tokenGenerationAndVerification(AccessTokenHandler master,
+ AccessTokenHandler slave) throws Exception {
+ // single-mode tokens
+ for (AccessTokenHandler.AccessMode mode : AccessTokenHandler.AccessMode
+ .values()) {
+ // generated by master
+ AccessToken token1 = master.generateToken(blockID1, EnumSet.of(mode));
+ assertTrue(master.checkAccess(token1, null, blockID1, mode));
+ assertTrue(slave.checkAccess(token1, null, blockID1, mode));
+ // generated by slave
+ AccessToken token2 = slave.generateToken(blockID2, EnumSet.of(mode));
+ assertTrue(master.checkAccess(token2, null, blockID2, mode));
+ assertTrue(slave.checkAccess(token2, null, blockID2, mode));
+ }
+ // multi-mode tokens
+ AccessToken mtoken = master.generateToken(blockID3, EnumSet
+ .allOf(AccessTokenHandler.AccessMode.class));
+ for (AccessTokenHandler.AccessMode mode : AccessTokenHandler.AccessMode
+ .values()) {
+ assertTrue(master.checkAccess(mtoken, null, blockID3, mode));
+ assertTrue(slave.checkAccess(mtoken, null, blockID3, mode));
+ }
+ }
+
+ /** test access key and token handling */
+ public void testAccessTokenHandler() throws Exception {
+ AccessTokenHandler masterHandler = new AccessTokenHandler(true,
+ accessKeyUpdateInterval, accessTokenLifetime);
+ AccessTokenHandler slaveHandler = new AccessTokenHandler(false,
+ accessKeyUpdateInterval, accessTokenLifetime);
+ ExportedAccessKeys keys = masterHandler.exportKeys();
+ slaveHandler.setKeys(keys);
+ tokenGenerationAndVerification(masterHandler, slaveHandler);
+ // key updating
+ masterHandler.updateKeys();
+ tokenGenerationAndVerification(masterHandler, slaveHandler);
+ keys = masterHandler.exportKeys();
+ slaveHandler.setKeys(keys);
+ tokenGenerationAndVerification(masterHandler, slaveHandler);
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/security/TestUnixUserGroupInformation.java b/src/test/core/org/apache/hadoop/security/TestUnixUserGroupInformation.java
new file mode 100644
index 0000000000..51880c2d1f
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/security/TestUnixUserGroupInformation.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.TestWritable;
+
+import junit.framework.TestCase;
+
+/** Unit tests for UnixUserGroupInformation */
+public class TestUnixUserGroupInformation extends TestCase {
+ final private static String USER_NAME = "user1";
+ final private static String GROUP1_NAME = "group1";
+ final private static String GROUP2_NAME = "group2";
+ final private static String GROUP3_NAME = "group3";
+ final private static String[] GROUP_NAMES =
+ new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME};
+
+ /** Test login method */
+ public void testLogin() throws Exception {
+ Configuration conf = new Configuration();
+
+ // loin from unix
+ String userName = UnixUserGroupInformation.getUnixUserName();
+ UnixUserGroupInformation curUserGroupInfo =
+ UnixUserGroupInformation.login(conf);
+ assertEquals(curUserGroupInfo.getUserName(), userName);
+ assertTrue(curUserGroupInfo == UnixUserGroupInformation.login(conf));
+
+ // login from the configuration
+ UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation(
+ USER_NAME, GROUP_NAMES );
+ UnixUserGroupInformation.saveToConf(conf,
+ UnixUserGroupInformation.UGI_PROPERTY_NAME, userGroupInfo);
+ curUserGroupInfo = UnixUserGroupInformation.login(conf);
+ assertEquals(curUserGroupInfo, userGroupInfo);
+ assertTrue(curUserGroupInfo == UnixUserGroupInformation.login(conf));
+ }
+
+ /** test constructor */
+ public void testConstructor() throws Exception {
+ UnixUserGroupInformation uugi =
+ new UnixUserGroupInformation(USER_NAME, GROUP_NAMES);
+ assertEquals(uugi, new UnixUserGroupInformation( new String[]{
+ USER_NAME, GROUP1_NAME, GROUP2_NAME, GROUP3_NAME} ));
+ // failure test
+ testConstructorFailures(null, GROUP_NAMES);
+ testConstructorFailures("", GROUP_NAMES);
+ testConstructorFailures(USER_NAME, null);
+ testConstructorFailures(USER_NAME, new String[0]);
+ testConstructorFailures(USER_NAME, new String[]{null});
+ testConstructorFailures(USER_NAME, new String[]{""});
+ testConstructorFailures(USER_NAME, new String[]{GROUP1_NAME, null});
+ testConstructorFailures(USER_NAME,
+ new String[]{GROUP1_NAME, null, GROUP2_NAME});
+ }
+
+ private void testConstructorFailures(String userName, String[] groupNames) {
+ boolean gotException = false;
+ try {
+ new UnixUserGroupInformation(userName, groupNames);
+ } catch (Exception e) {
+ gotException = true;
+ }
+ assertTrue(gotException);
+ }
+
+ public void testEquals() throws Exception {
+ UnixUserGroupInformation uugi =
+ new UnixUserGroupInformation(USER_NAME, GROUP_NAMES);
+
+ assertEquals(uugi, uugi);
+ assertEquals(uugi, new UnixUserGroupInformation(USER_NAME, GROUP_NAMES));
+ assertEquals(uugi, new UnixUserGroupInformation(USER_NAME,
+ new String[]{GROUP1_NAME, GROUP3_NAME, GROUP2_NAME}));
+ assertFalse(uugi.equals(new UnixUserGroupInformation()));
+ assertFalse(uugi.equals(new UnixUserGroupInformation(USER_NAME,
+ new String[]{GROUP2_NAME, GROUP3_NAME, GROUP1_NAME})));
+ }
+
+ /** test Writable */
+ public void testWritable() throws Exception {
+ UnixUserGroupInformation ugi = new UnixUserGroupInformation(
+ USER_NAME, GROUP_NAMES);
+ TestWritable.testWritable(ugi, new Configuration());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java b/src/test/core/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java
new file mode 100644
index 0000000000..203946cabd
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/security/authorize/TestConfiguredPolicy.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authorize;
+
+import java.security.Permission;
+
+import javax.security.auth.Subject;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.SecurityUtil.AccessControlList;
+
+import junit.framework.TestCase;
+
+public class TestConfiguredPolicy extends TestCase {
+ private static final String USER1 = "drwho";
+ private static final String USER2 = "joe";
+ private static final String[] GROUPS1 = new String[]{"tardis"};
+ private static final String[] GROUPS2 = new String[]{"users"};
+
+ private static final String KEY_1 = "test.policy.1";
+ private static final String KEY_2 = "test.policy.2";
+
+ public static class Protocol1 {
+ int i;
+ }
+ public static class Protocol2 {
+ int j;
+ }
+
+ private static class TestPolicyProvider extends PolicyProvider {
+ @Override
+ public Service[] getServices() {
+ return new Service[] {
+ new Service(KEY_1, Protocol1.class),
+ new Service(KEY_2, Protocol2.class),
+ };
+ }
+ }
+
+ public void testConfiguredPolicy() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(KEY_1, AccessControlList.WILDCARD_ACL_VALUE);
+ conf.set(KEY_2, USER1 + " " + GROUPS1[0]);
+
+ ConfiguredPolicy policy = new ConfiguredPolicy(conf, new TestPolicyProvider());
+ SecurityUtil.setPolicy(policy);
+
+ Subject user1 =
+ SecurityUtil.getSubject(new UnixUserGroupInformation(USER1, GROUPS1));
+
+ // Should succeed
+ ServiceAuthorizationManager.authorize(user1, Protocol1.class);
+
+ // Should fail
+ Subject user2 =
+ SecurityUtil.getSubject(new UnixUserGroupInformation(USER2, GROUPS2));
+ boolean failed = false;
+ try {
+ ServiceAuthorizationManager.authorize(user2, Protocol2.class);
+ } catch (AuthorizationException ae) {
+ failed = true;
+ }
+ assertTrue(failed);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/test/CoreTestDriver.java b/src/test/core/org/apache/hadoop/test/CoreTestDriver.java
new file mode 100644
index 0000000000..06590c9cdf
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/test/CoreTestDriver.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.test;
+
+import org.apache.hadoop.io.TestArrayFile;
+import org.apache.hadoop.io.TestSetFile;
+import org.apache.hadoop.ipc.TestIPC;
+import org.apache.hadoop.ipc.TestRPC;
+import org.apache.hadoop.util.ProgramDriver;
+
+/**
+ * Driver for core tests.
+ */
+public class CoreTestDriver {
+
+ private ProgramDriver pgd;
+
+ public CoreTestDriver() {
+ this(new ProgramDriver());
+ }
+
+ public CoreTestDriver(ProgramDriver pgd) {
+ this.pgd = pgd;
+ try {
+ pgd.addClass("testsetfile", TestSetFile.class,
+ "A test for flat files of binary key/value pairs.");
+ pgd.addClass("testarrayfile", TestArrayFile.class,
+ "A test for flat files of binary key/value pairs.");
+ pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
+ pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
+ } catch(Throwable e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void run(String argv[]) {
+ try {
+ pgd.driver(argv);
+ } catch(Throwable e) {
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String argv[]){
+ new CoreTestDriver().run(argv);
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/util/TestCyclicIteration.java b/src/test/core/org/apache/hadoop/util/TestCyclicIteration.java
new file mode 100644
index 0000000000..7dfa4763e1
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/util/TestCyclicIteration.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+public class TestCyclicIteration extends junit.framework.TestCase {
+ public void testCyclicIteration() throws Exception {
+ for(int n = 0; n < 5; n++) {
+ checkCyclicIteration(n);
+ }
+ }
+
+ private static void checkCyclicIteration(int numOfElements) {
+ //create a tree map
+ final NavigableMap map = new TreeMap();
+ final Integer[] integers = new Integer[numOfElements];
+ for(int i = 0; i < integers.length; i++) {
+ integers[i] = 2*i;
+ map.put(integers[i], integers[i]);
+ }
+ System.out.println("\n\nintegers=" + Arrays.asList(integers));
+ System.out.println("map=" + map);
+
+ //try starting everywhere
+ for(int start = -1; start <= 2*integers.length - 1; start++) {
+ //get a cyclic iteration
+ final List iteration = new ArrayList();
+ for(Map.Entry e : new CyclicIteration(map, start)) {
+ iteration.add(e.getKey());
+ }
+ System.out.println("start=" + start + ", iteration=" + iteration);
+
+ //verify results
+ for(int i = 0; i < integers.length; i++) {
+ final int j = ((start+2)/2 + i)%integers.length;
+ assertEquals("i=" + i + ", j=" + j, iteration.get(i), integers[j]);
+ }
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/util/TestGenericsUtil.java b/src/test/core/org/apache/hadoop/util/TestGenericsUtil.java
new file mode 100644
index 0000000000..af494c909d
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/util/TestGenericsUtil.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class TestGenericsUtil extends TestCase {
+
+ public void testToArray() {
+
+ //test a list of size 10
+ List list = new ArrayList();
+
+ for(int i=0; i<10; i++) {
+ list.add(i);
+ }
+
+ Integer[] arr = GenericsUtil.toArray(list);
+
+ for (int i = 0; i < arr.length; i++) {
+ assertEquals(list.get(i), arr[i]);
+ }
+ }
+
+ public void testWithEmptyList() {
+ try {
+ List list = new ArrayList();
+ String[] arr = GenericsUtil.toArray(list);
+ fail("Empty array should throw exception");
+ System.out.println(arr); //use arr so that compiler will not complain
+
+ }catch (IndexOutOfBoundsException ex) {
+ //test case is successful
+ }
+ }
+
+ public void testWithEmptyList2() {
+ List list = new ArrayList();
+ //this method should not throw IndexOutOfBoundsException
+ String[] arr = GenericsUtil.toArray(String.class, list);
+
+ assertEquals(0, arr.length);
+ }
+
+ /** This class uses generics */
+ private class GenericClass {
+ T dummy;
+ List list = new ArrayList();
+
+ void add(T item) {
+ list.add(item);
+ }
+
+ T[] funcThatUsesToArray() {
+ T[] arr = GenericsUtil.toArray(list);
+ return arr;
+ }
+ }
+
+ public void testWithGenericClass() {
+
+ GenericClass testSubject = new GenericClass();
+
+ testSubject.add("test1");
+ testSubject.add("test2");
+
+ try {
+ //this cast would fail, if we had not used GenericsUtil.toArray, since the
+ //rmethod would return Object[] rather than String[]
+ String[] arr = testSubject.funcThatUsesToArray();
+
+ assertEquals("test1", arr[0]);
+ assertEquals("test2", arr[1]);
+
+ }catch (ClassCastException ex) {
+ fail("GenericsUtil#toArray() is not working for generic classes");
+ }
+
+ }
+
+ public void testGenericOptionsParser() throws Exception {
+ GenericOptionsParser parser = new GenericOptionsParser(
+ new Configuration(), new String[] {"-jt"});
+ assertEquals(parser.getRemainingArgs().length, 0);
+ }
+
+ public void testGetClass() {
+
+ //test with Integer
+ Integer x = new Integer(42);
+ Class c = GenericsUtil.getClass(x);
+ assertEquals(Integer.class, c);
+
+ //test with GenericClass
+ GenericClass testSubject = new GenericClass();
+ Class> c2 = GenericsUtil.getClass(testSubject);
+ assertEquals(GenericClass.class, c2);
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/util/TestIndexedSort.java b/src/test/core/org/apache/hadoop/util/TestIndexedSort.java
new file mode 100644
index 0000000000..d806a0adce
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/util/TestIndexedSort.java
@@ -0,0 +1,361 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparator;
+
+public class TestIndexedSort extends TestCase {
+
+ public void sortAllEqual(IndexedSorter sorter) throws Exception {
+ final int SAMPLE = 500;
+ int[] values = new int[SAMPLE];
+ Arrays.fill(values, 10);
+ SampleSortable s = new SampleSortable(values);
+ sorter.sort(s, 0, SAMPLE);
+ int[] check = s.getSorted();
+ assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
+ Arrays.toString(check), Arrays.equals(values, check));
+ // Set random min/max, re-sort.
+ Random r = new Random();
+ int min = r.nextInt(SAMPLE);
+ int max = (min + 1 + r.nextInt(SAMPLE - 2)) % SAMPLE;
+ values[min] = 9;
+ values[max] = 11;
+ System.out.println("testAllEqual setting min/max at " + min + "/" + max +
+ "(" + sorter.getClass().getName() + ")");
+ s = new SampleSortable(values);
+ sorter.sort(s, 0, SAMPLE);
+ check = s.getSorted();
+ Arrays.sort(values);
+ assertTrue(check[0] == 9);
+ assertTrue(check[SAMPLE - 1] == 11);
+ assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
+ Arrays.toString(check), Arrays.equals(values, check));
+ }
+
+ public void sortSorted(IndexedSorter sorter) throws Exception {
+ final int SAMPLE = 500;
+ int[] values = new int[SAMPLE];
+ Random r = new Random();
+ long seed = r.nextLong();
+ r.setSeed(seed);
+ System.out.println("testSorted seed: " + seed +
+ "(" + sorter.getClass().getName() + ")");
+ for (int i = 0; i < SAMPLE; ++i) {
+ values[i] = r.nextInt(100);
+ }
+ Arrays.sort(values);
+ SampleSortable s = new SampleSortable(values);
+ sorter.sort(s, 0, SAMPLE);
+ int[] check = s.getSorted();
+ assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
+ Arrays.toString(check), Arrays.equals(values, check));
+ }
+
+ public void sortSequential(IndexedSorter sorter) throws Exception {
+ final int SAMPLE = 500;
+ int[] values = new int[SAMPLE];
+ for (int i = 0; i < SAMPLE; ++i) {
+ values[i] = i;
+ }
+ SampleSortable s = new SampleSortable(values);
+ sorter.sort(s, 0, SAMPLE);
+ int[] check = s.getSorted();
+ assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
+ Arrays.toString(check), Arrays.equals(values, check));
+ }
+
+ public void sortSingleRecord(IndexedSorter sorter) throws Exception {
+ final int SAMPLE = 1;
+ SampleSortable s = new SampleSortable(SAMPLE);
+ int[] values = s.getValues();
+ sorter.sort(s, 0, SAMPLE);
+ int[] check = s.getSorted();
+ assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
+ Arrays.toString(check), Arrays.equals(values, check));
+ }
+
+ public void sortRandom(IndexedSorter sorter) throws Exception {
+ final int SAMPLE = 256 * 1024;
+ SampleSortable s = new SampleSortable(SAMPLE);
+ long seed = s.getSeed();
+ System.out.println("sortRandom seed: " + seed +
+ "(" + sorter.getClass().getName() + ")");
+ int[] values = s.getValues();
+ Arrays.sort(values);
+ sorter.sort(s, 0, SAMPLE);
+ int[] check = s.getSorted();
+ assertTrue("seed: " + seed + "\ndoesn't match\n",
+ Arrays.equals(values, check));
+ }
+
+ public void sortWritable(IndexedSorter sorter) throws Exception {
+ final int SAMPLE = 1000;
+ WritableSortable s = new WritableSortable(SAMPLE);
+ long seed = s.getSeed();
+ System.out.println("sortWritable seed: " + seed +
+ "(" + sorter.getClass().getName() + ")");
+ String[] values = s.getValues();
+ Arrays.sort(values);
+ sorter.sort(s, 0, SAMPLE);
+ String[] check = s.getSorted();
+ assertTrue("seed: " + seed + "\ndoesn't match",
+ Arrays.equals(values, check));
+ }
+
+
+ public void testQuickSort() throws Exception {
+ QuickSort sorter = new QuickSort();
+ sortRandom(sorter);
+ sortSingleRecord(sorter);
+ sortSequential(sorter);
+ sortSorted(sorter);
+ sortAllEqual(sorter);
+ sortWritable(sorter);
+
+ // test degenerate case for median-of-three partitioning
+ // a_n, a_1, a_2, ..., a_{n-1}
+ final int DSAMPLE = 500;
+ int[] values = new int[DSAMPLE];
+ for (int i = 0; i < DSAMPLE; ++i) { values[i] = i; }
+ values[0] = values[DSAMPLE - 1] + 1;
+ SampleSortable s = new SampleSortable(values);
+ values = s.getValues();
+ final int DSS = (DSAMPLE / 2) * (DSAMPLE / 2);
+ // Worst case is (N/2)^2 comparisons, not including those effecting
+ // the median-of-three partitioning; impl should handle this case
+ MeasuredSortable m = new MeasuredSortable(s, DSS);
+ sorter.sort(m, 0, DSAMPLE);
+ System.out.println("QuickSort degen cmp/swp: " +
+ m.getCmp() + "/" + m.getSwp() +
+ "(" + sorter.getClass().getName() + ")");
+ Arrays.sort(values);
+ int[] check = s.getSorted();
+ assertTrue(Arrays.equals(values, check));
+ }
+
+ public void testHeapSort() throws Exception {
+ HeapSort sorter = new HeapSort();
+ sortRandom(sorter);
+ sortSingleRecord(sorter);
+ sortSequential(sorter);
+ sortSorted(sorter);
+ sortAllEqual(sorter);
+ sortWritable(sorter);
+ }
+
+ // Sortables //
+
+ private static class SampleSortable implements IndexedSortable {
+ private int[] valindex;
+ private int[] valindirect;
+ private int[] values;
+ private final long seed;
+
+ public SampleSortable() {
+ this(50);
+ }
+
+ public SampleSortable(int j) {
+ Random r = new Random();
+ seed = r.nextLong();
+ r.setSeed(seed);
+ values = new int[j];
+ valindex = new int[j];
+ valindirect = new int[j];
+ for (int i = 0; i < j; ++i) {
+ valindex[i] = valindirect[i] = i;
+ values[i] = r.nextInt(1000);
+ }
+ }
+
+ public SampleSortable(int[] values) {
+ this.values = values;
+ valindex = new int[values.length];
+ valindirect = new int[values.length];
+ for (int i = 0; i < values.length; ++i) {
+ valindex[i] = valindirect[i] = i;
+ }
+ seed = 0;
+ }
+
+ public long getSeed() {
+ return seed;
+ }
+
+ public int compare(int i, int j) {
+ // assume positive
+ return
+ values[valindirect[valindex[i]]] - values[valindirect[valindex[j]]];
+ }
+
+ public void swap(int i, int j) {
+ int tmp = valindex[i];
+ valindex[i] = valindex[j];
+ valindex[j] = tmp;
+ }
+
+ public int[] getSorted() {
+ int[] ret = new int[values.length];
+ for (int i = 0; i < ret.length; ++i) {
+ ret[i] = values[valindirect[valindex[i]]];
+ }
+ return ret;
+ }
+
+ public int[] getValues() {
+ int[] ret = new int[values.length];
+ System.arraycopy(values, 0, ret, 0, values.length);
+ return ret;
+ }
+
+ }
+
+ public static class MeasuredSortable implements IndexedSortable {
+
+ private int comparisions;
+ private int swaps;
+ private final int maxcmp;
+ private final int maxswp;
+ private IndexedSortable s;
+
+ public MeasuredSortable(IndexedSortable s) {
+ this(s, Integer.MAX_VALUE);
+ }
+
+ public MeasuredSortable(IndexedSortable s, int maxcmp) {
+ this(s, maxcmp, Integer.MAX_VALUE);
+ }
+
+ public MeasuredSortable(IndexedSortable s, int maxcmp, int maxswp) {
+ this.s = s;
+ this.maxcmp = maxcmp;
+ this.maxswp = maxswp;
+ }
+
+ public int getCmp() { return comparisions; }
+ public int getSwp() { return swaps; }
+
+ public int compare(int i, int j) {
+ assertTrue("Expected fewer than " + maxcmp + " comparisons",
+ ++comparisions < maxcmp);
+ return s.compare(i, j);
+ }
+
+ public void swap(int i, int j) {
+ assertTrue("Expected fewer than " + maxswp + " swaps",
+ ++swaps < maxswp);
+ s.swap(i, j);
+ }
+
+ }
+
+ private static class WritableSortable implements IndexedSortable {
+
+ private static Random r = new Random();
+ private final int eob;
+ private final int[] indices;
+ private final int[] offsets;
+ private final byte[] bytes;
+ private final WritableComparator comparator;
+ private final String[] check;
+ private final long seed;
+
+ public WritableSortable() throws IOException {
+ this(100);
+ }
+
+ public WritableSortable(int j) throws IOException {
+ seed = r.nextLong();
+ r.setSeed(seed);
+ Text t = new Text();
+ StringBuffer sb = new StringBuffer();
+ indices = new int[j];
+ offsets = new int[j];
+ check = new String[j];
+ DataOutputBuffer dob = new DataOutputBuffer();
+ for (int i = 0; i < j; ++i) {
+ indices[i] = i;
+ offsets[i] = dob.getLength();
+ genRandom(t, r.nextInt(15) + 1, sb);
+ t.write(dob);
+ check[i] = t.toString();
+ }
+ eob = dob.getLength();
+ bytes = dob.getData();
+ comparator = WritableComparator.get(Text.class);
+ }
+
+ public long getSeed() {
+ return seed;
+ }
+
+ private static void genRandom(Text t, int len, StringBuffer sb) {
+ sb.setLength(0);
+ for (int i = 0; i < len; ++i) {
+ sb.append(Integer.toString(r.nextInt(26) + 10, 36));
+ }
+ t.set(sb.toString());
+ }
+
+ public int compare(int i, int j) {
+ final int ii = indices[i];
+ final int ij = indices[j];
+ return comparator.compare(bytes, offsets[ii],
+ ((ii + 1 == indices.length) ? eob : offsets[ii + 1]) - offsets[ii],
+ bytes, offsets[ij],
+ ((ij + 1 == indices.length) ? eob : offsets[ij + 1]) - offsets[ij]);
+ }
+
+ public void swap(int i, int j) {
+ int tmp = indices[i];
+ indices[i] = indices[j];
+ indices[j] = tmp;
+ }
+
+ public String[] getValues() {
+ return check;
+ }
+
+ public String[] getSorted() throws IOException {
+ String[] ret = new String[indices.length];
+ Text t = new Text();
+ DataInputBuffer dib = new DataInputBuffer();
+ for (int i = 0; i < ret.length; ++i) {
+ int ii = indices[i];
+ dib.reset(bytes, offsets[ii],
+ ((ii + 1 == indices.length) ? eob : offsets[ii + 1]) - offsets[ii]);
+ t.readFields(dib);
+ ret[i] = t.toString();
+ }
+ return ret;
+ }
+
+ }
+
+}
diff --git a/src/test/core/org/apache/hadoop/util/TestProcfsBasedProcessTree.java b/src/test/core/org/apache/hadoop/util/TestProcfsBasedProcessTree.java
new file mode 100644
index 0000000000..0b97507402
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/util/TestProcfsBasedProcessTree.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Random;
+import java.util.Vector;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+
+import junit.framework.TestCase;
+
+/**
+ * A JUnit test to test ProcfsBasedProcessTree.
+ */
+public class TestProcfsBasedProcessTree extends TestCase {
+
+ private static final Log LOG = LogFactory
+ .getLog(TestProcfsBasedProcessTree.class);
+ private static String TEST_ROOT_DIR = new Path(System.getProperty(
+ "test.build.data", "/tmp")).toString().replace(' ', '+');
+
+ private ShellCommandExecutor shexec = null;
+ private String pidFile, lowestDescendant;
+ private String shellScript;
+ private static final int N = 6; // Controls the RogueTask
+
+ private class RogueTaskThread extends Thread {
+ public void run() {
+ try {
+ Vector args = new Vector();
+ if(ProcessTree.isSetsidAvailable) {
+ args.add("setsid");
+ }
+ args.add("bash");
+ args.add("-c");
+ args.add(" echo $$ > " + pidFile + "; sh " +
+ shellScript + " " + N + ";") ;
+ shexec = new ShellCommandExecutor(args.toArray(new String[0]));
+ shexec.execute();
+ } catch (ExitCodeException ee) {
+ LOG.info("Shell Command exit with a non-zero exit code. This is" +
+ " expected as we are killing the subprocesses of the" +
+ " task intentionally. " + ee);
+ } catch (IOException ioe) {
+ LOG.info("Error executing shell command " + ioe);
+ } finally {
+ LOG.info("Exit code: " + shexec.getExitCode());
+ }
+ }
+ }
+
+ private String getRogueTaskPID() {
+ File f = new File(pidFile);
+ while (!f.exists()) {
+ try {
+ Thread.sleep(500);
+ } catch (InterruptedException ie) {
+ break;
+ }
+ }
+
+ // read from pidFile
+ return getPidFromPidFile(pidFile);
+ }
+
+ public void testProcessTree() {
+
+ try {
+ if (!ProcfsBasedProcessTree.isAvailable()) {
+ System.out
+ .println("ProcfsBasedProcessTree is not available on this system. Not testing");
+ return;
+ }
+ } catch (Exception e) {
+ LOG.info(StringUtils.stringifyException(e));
+ return;
+ }
+ // create shell script
+ Random rm = new Random();
+ File tempFile = new File(TEST_ROOT_DIR, this.getName() + "_shellScript_" +
+ rm.nextInt() + ".sh");
+ tempFile.deleteOnExit();
+ shellScript = TEST_ROOT_DIR + File.separator + tempFile.getName();
+
+ // create pid file
+ tempFile = new File(TEST_ROOT_DIR, this.getName() + "_pidFile_" +
+ rm.nextInt() + ".pid");
+ tempFile.deleteOnExit();
+ pidFile = TEST_ROOT_DIR + File.separator + tempFile.getName();
+
+ lowestDescendant = TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile";
+
+ // write to shell-script
+ try {
+ FileWriter fWriter = new FileWriter(shellScript);
+ fWriter.write(
+ "# rogue task\n" +
+ "sleep 1\n" +
+ "echo hello\n" +
+ "if [ $1 -ne 0 ]\n" +
+ "then\n" +
+ " sh " + shellScript + " $(($1-1))\n" +
+ "else\n" +
+ " echo $$ > " + lowestDescendant + "\n" +
+ " while true\n do\n" +
+ " sleep 5\n" +
+ " done\n" +
+ "fi");
+ fWriter.close();
+ } catch (IOException ioe) {
+ LOG.info("Error: " + ioe);
+ return;
+ }
+
+ Thread t = new RogueTaskThread();
+ t.start();
+ String pid = getRogueTaskPID();
+ LOG.info("Root process pid: " + pid);
+ ProcfsBasedProcessTree p = new ProcfsBasedProcessTree(pid,
+ ProcessTree.isSetsidAvailable,
+ ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
+ p = p.getProcessTree(); // initialize
+ LOG.info("ProcessTree: " + p.toString());
+
+ File leaf = new File(lowestDescendant);
+ //wait till lowest descendant process of Rougue Task starts execution
+ while (!leaf.exists()) {
+ try {
+ Thread.sleep(500);
+ } catch (InterruptedException ie) {
+ break;
+ }
+ }
+
+ p = p.getProcessTree(); // reconstruct
+ LOG.info("ProcessTree: " + p.toString());
+
+ // destroy the map task and all its subprocesses
+ p.destroy(true/*in the background*/);
+
+ if(ProcessTree.isSetsidAvailable) {// whole processtree should be gone
+ assertEquals(false, p.isAnyProcessInTreeAlive());
+ }
+ else {// process should be gone
+ assertFalse("ProcessTree must have been gone", p.isAlive());
+ }
+ // Not able to join thread sometimes when forking with large N.
+ try {
+ t.join(2000);
+ LOG.info("RogueTaskThread successfully joined.");
+ } catch (InterruptedException ie) {
+ LOG.info("Interrupted while joining RogueTaskThread.");
+ }
+
+ // ProcessTree is gone now. Any further calls should be sane.
+ p = p.getProcessTree();
+ assertFalse("ProcessTree must have been gone", p.isAlive());
+ assertTrue("Cumulative vmem for the gone-process is "
+ + p.getCumulativeVmem() + " . It should be zero.", p
+ .getCumulativeVmem() == 0);
+ assertTrue(p.toString().equals("[ ]"));
+ }
+
+ /**
+ * Get PID from a pid-file.
+ *
+ * @param pidFileName
+ * Name of the pid-file.
+ * @return the PID string read from the pid-file. Returns null if the
+ * pidFileName points to a non-existing file or if read fails from the
+ * file.
+ */
+ public static String getPidFromPidFile(String pidFileName) {
+ BufferedReader pidFile = null;
+ FileReader fReader = null;
+ String pid = null;
+
+ try {
+ fReader = new FileReader(pidFileName);
+ pidFile = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ LOG.debug("PidFile doesn't exist : " + pidFileName);
+ return pid;
+ }
+
+ try {
+ pid = pidFile.readLine();
+ } catch (IOException i) {
+ LOG.error("Failed to read from " + pidFileName);
+ } finally {
+ try {
+ if (fReader != null) {
+ fReader.close();
+ }
+ try {
+ if (pidFile != null) {
+ pidFile.close();
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + pidFile);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ return pid;
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/util/TestShell.java b/src/test/core/org/apache/hadoop/util/TestShell.java
new file mode 100644
index 0000000000..ca7303187b
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/util/TestShell.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import junit.framework.TestCase;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+
+public class TestShell extends TestCase {
+
+ private static class Command extends Shell {
+ private int runCount = 0;
+
+ private Command(long interval) {
+ super(interval);
+ }
+
+ protected String[] getExecString() {
+ return new String[] {"echo", "hello"};
+ }
+
+ protected void parseExecResult(BufferedReader lines) throws IOException {
+ ++runCount;
+ }
+
+ public int getRunCount() {
+ return runCount;
+ }
+ }
+
+ public void testInterval() throws IOException {
+ testInterval(Long.MIN_VALUE / 60000); // test a negative interval
+ testInterval(0L); // test a zero interval
+ testInterval(10L); // interval equal to 10mins
+ testInterval(System.currentTimeMillis() / 60000 + 60); // test a very big interval
+ }
+
+ /**
+ * Assert that a string has a substring in it
+ * @param string string to search
+ * @param search what to search for it
+ */
+ private void assertInString(String string, String search) {
+ assertNotNull("Empty String", string);
+ if (!string.contains(search)) {
+ fail("Did not find \"" + search + "\" in " + string);
+ }
+ }
+
+ public void testShellCommandExecutorToString() throws Throwable {
+ Shell.ShellCommandExecutor sce=new Shell.ShellCommandExecutor(
+ new String[] { "ls","..","arg 2"});
+ String command = sce.toString();
+ assertInString(command,"ls");
+ assertInString(command, " .. ");
+ assertInString(command, "\"arg 2\"");
+ }
+
+ private void testInterval(long interval) throws IOException {
+ Command command = new Command(interval);
+
+ command.run();
+ assertEquals(1, command.getRunCount());
+
+ command.run();
+ if (interval > 0) {
+ assertEquals(1, command.getRunCount());
+ } else {
+ assertEquals(2, command.getRunCount());
+ }
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/util/TestStringUtils.java b/src/test/core/org/apache/hadoop/util/TestStringUtils.java
new file mode 100644
index 0000000000..e68609ae2f
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/util/TestStringUtils.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import junit.framework.TestCase;
+
+public class TestStringUtils extends TestCase {
+ final private static String NULL_STR = null;
+ final private static String EMPTY_STR = "";
+ final private static String STR_WO_SPECIAL_CHARS = "AB";
+ final private static String STR_WITH_COMMA = "A,B";
+ final private static String ESCAPED_STR_WITH_COMMA = "A\\,B";
+ final private static String STR_WITH_ESCAPE = "AB\\";
+ final private static String ESCAPED_STR_WITH_ESCAPE = "AB\\\\";
+ final private static String STR_WITH_BOTH2 = ",A\\,,B\\\\,";
+ final private static String ESCAPED_STR_WITH_BOTH2 =
+ "\\,A\\\\\\,\\,B\\\\\\\\\\,";
+
+ public void testEscapeString() throws Exception {
+ assertEquals(NULL_STR, StringUtils.escapeString(NULL_STR));
+ assertEquals(EMPTY_STR, StringUtils.escapeString(EMPTY_STR));
+ assertEquals(STR_WO_SPECIAL_CHARS,
+ StringUtils.escapeString(STR_WO_SPECIAL_CHARS));
+ assertEquals(ESCAPED_STR_WITH_COMMA,
+ StringUtils.escapeString(STR_WITH_COMMA));
+ assertEquals(ESCAPED_STR_WITH_ESCAPE,
+ StringUtils.escapeString(STR_WITH_ESCAPE));
+ assertEquals(ESCAPED_STR_WITH_BOTH2,
+ StringUtils.escapeString(STR_WITH_BOTH2));
+ }
+
+ public void testSplit() throws Exception {
+ assertEquals(NULL_STR, StringUtils.split(NULL_STR));
+ String[] splits = StringUtils.split(EMPTY_STR);
+ assertEquals(0, splits.length);
+ splits = StringUtils.split(",,");
+ assertEquals(0, splits.length);
+ splits = StringUtils.split(STR_WO_SPECIAL_CHARS);
+ assertEquals(1, splits.length);
+ assertEquals(STR_WO_SPECIAL_CHARS, splits[0]);
+ splits = StringUtils.split(STR_WITH_COMMA);
+ assertEquals(2, splits.length);
+ assertEquals("A", splits[0]);
+ assertEquals("B", splits[1]);
+ splits = StringUtils.split(ESCAPED_STR_WITH_COMMA);
+ assertEquals(1, splits.length);
+ assertEquals(ESCAPED_STR_WITH_COMMA, splits[0]);
+ splits = StringUtils.split(STR_WITH_ESCAPE);
+ assertEquals(1, splits.length);
+ assertEquals(STR_WITH_ESCAPE, splits[0]);
+ splits = StringUtils.split(STR_WITH_BOTH2);
+ assertEquals(3, splits.length);
+ assertEquals(EMPTY_STR, splits[0]);
+ assertEquals("A\\,", splits[1]);
+ assertEquals("B\\\\", splits[2]);
+ splits = StringUtils.split(ESCAPED_STR_WITH_BOTH2);
+ assertEquals(1, splits.length);
+ assertEquals(ESCAPED_STR_WITH_BOTH2, splits[0]);
+ }
+
+ public void testUnescapeString() throws Exception {
+ assertEquals(NULL_STR, StringUtils.unEscapeString(NULL_STR));
+ assertEquals(EMPTY_STR, StringUtils.unEscapeString(EMPTY_STR));
+ assertEquals(STR_WO_SPECIAL_CHARS,
+ StringUtils.unEscapeString(STR_WO_SPECIAL_CHARS));
+ try {
+ StringUtils.unEscapeString(STR_WITH_COMMA);
+ fail("Should throw IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ assertEquals(STR_WITH_COMMA,
+ StringUtils.unEscapeString(ESCAPED_STR_WITH_COMMA));
+ try {
+ StringUtils.unEscapeString(STR_WITH_ESCAPE);
+ fail("Should throw IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ assertEquals(STR_WITH_ESCAPE,
+ StringUtils.unEscapeString(ESCAPED_STR_WITH_ESCAPE));
+ try {
+ StringUtils.unEscapeString(STR_WITH_BOTH2);
+ fail("Should throw IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ assertEquals(STR_WITH_BOTH2,
+ StringUtils.unEscapeString(ESCAPED_STR_WITH_BOTH2));
+ }
+
+ public void testTraditionalBinaryPrefix() throws Exception {
+ String[] symbol = {"k", "m", "g", "t", "p", "e"};
+ long m = 1024;
+ for(String s : symbol) {
+ assertEquals(0, StringUtils.TraditionalBinaryPrefix.string2long(0 + s));
+ assertEquals(m, StringUtils.TraditionalBinaryPrefix.string2long(1 + s));
+ m *= 1024;
+ }
+
+ assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0"));
+ assertEquals(-1259520L, StringUtils.TraditionalBinaryPrefix.string2long("-1230k"));
+ assertEquals(956703965184L, StringUtils.TraditionalBinaryPrefix.string2long("891g"));
+ }
+}
diff --git a/src/test/lib/ftplet-api-1.0.0-SNAPSHOT.jar b/src/test/lib/ftplet-api-1.0.0-SNAPSHOT.jar
deleted file mode 100644
index bc806e4014..0000000000
Binary files a/src/test/lib/ftplet-api-1.0.0-SNAPSHOT.jar and /dev/null differ
diff --git a/src/test/lib/ftpserver-core-1.0.0-SNAPSHOT.jar b/src/test/lib/ftpserver-core-1.0.0-SNAPSHOT.jar
deleted file mode 100644
index d469c27277..0000000000
Binary files a/src/test/lib/ftpserver-core-1.0.0-SNAPSHOT.jar and /dev/null differ
diff --git a/src/test/lib/ftpserver-server-1.0.0-SNAPSHOT.jar b/src/test/lib/ftpserver-server-1.0.0-SNAPSHOT.jar
deleted file mode 100644
index d23e583208..0000000000
Binary files a/src/test/lib/ftpserver-server-1.0.0-SNAPSHOT.jar and /dev/null differ
diff --git a/src/test/lib/mina-core-2.0.0-M2-20080407.124109-12.jar b/src/test/lib/mina-core-2.0.0-M2-20080407.124109-12.jar
deleted file mode 100644
index 89c81327a6..0000000000
Binary files a/src/test/lib/mina-core-2.0.0-M2-20080407.124109-12.jar and /dev/null differ