HDFS-3100. In BlockSender, throw an exception when it needs to verify checksum but the meta data does not exist. Contributed by Brandon Li

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1303628 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-03-22 00:10:20 +00:00
parent 2f820dde17
commit 0c4acdc176
5 changed files with 54 additions and 4 deletions

View File

@ -330,6 +330,9 @@ Release 0.23.3 - UNRELEASED
HDFS-3083. Cannot run an MR job with HA and security enabled when
second-listed NN active. (atm)
HDFS-3100. In BlockSender, throw an exception when it needs to verify
checksum but the meta data does not exist. (Brandon Li via szetszwo)
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -22,6 +22,7 @@
import java.io.DataOutputStream;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@ -218,9 +219,21 @@ class BlockSender implements java.io.Closeable {
this.transferToAllowed = datanode.getDnConf().transferToAllowed &&
(!is32Bit || length <= Integer.MAX_VALUE);
/*
* (corruptChecksumOK, meta_file_exist): operation
* True, True: will verify checksum
* True, False: No verify, e.g., need to read data from a corrupted file
* False, True: will verify checksum
* False, False: throws IOException file not found
*/
DataChecksum csum;
final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
if (!corruptChecksumOk || metaIn != null) {
if (metaIn == null) {
//need checksum but meta-data not found
throw new FileNotFoundException("Meta-data not found for " + block);
}
checksumIn = new DataInputStream(
new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

View File

@ -18,17 +18,21 @@
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Random;
import junit.framework.TestCase;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
/** Utilities for append-related tests */
@ -176,4 +180,23 @@ private static void checkData(final byte[] actual, int from,
actual[idx] = 0;
}
}
public static void testAppend(FileSystem fs, Path p) throws IOException {
final byte[] bytes = new byte[1000];
{ //create file
final FSDataOutputStream out = fs.create(p, (short)1);
out.write(bytes);
out.close();
Assert.assertEquals(bytes.length, fs.getFileStatus(p).getLen());
}
for(int i = 2; i < 500; i++) {
//append
final FSDataOutputStream out = fs.append(p);
out.write(bytes);
out.close();
Assert.assertEquals(i*bytes.length, fs.getFileStatus(p).getLen());
}
}
}

View File

@ -18,8 +18,11 @@
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
@ -46,5 +49,8 @@ protected void tearDown() throws Exception {
protected String getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
public void testAppend() throws IOException {
AppendTestUtil.testAppend(fs, new Path("/testAppend/f"));
}
}

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
@ -333,5 +334,9 @@ public void testResponseCode() throws IOException {
assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
conn.disconnect();
}
{//test append.
AppendTestUtil.testAppend(fs, new Path(dir, "append"));
}
}
}