HDFS-8546. Use try with resources in DataStorage and Storage.

This commit is contained in:
Andrew Wang 2015-06-25 17:50:32 -07:00
parent ff0e5e572f
commit 1403b84b12
3 changed files with 25 additions and 46 deletions

View File

@ -674,6 +674,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8665. Fix replication check in DFSTestUtils#waitForReplication. (wang) HDFS-8665. Fix replication check in DFSTestUtils#waitForReplication. (wang)
HDFS-8546. Use try with resources in DataStorage and Storage. (wang)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -709,6 +709,7 @@ FileLock tryLock() throws IOException {
try { try {
res = file.getChannel().tryLock(); res = file.getChannel().tryLock();
if (null == res) { if (null == res) {
LOG.error("Unable to acquire file lock on path " + lockF.toString());
throw new OverlappingFileLockException(); throw new OverlappingFileLockException();
} }
file.write(jvmName.getBytes(Charsets.UTF_8)); file.write(jvmName.getBytes(Charsets.UTF_8));
@ -972,35 +973,28 @@ public void writeProperties(StorageDirectory sd) throws IOException {
public void writeProperties(File to, StorageDirectory sd) throws IOException { public void writeProperties(File to, StorageDirectory sd) throws IOException {
Properties props = new Properties(); Properties props = new Properties();
setPropertiesFromFields(props, sd); setPropertiesFromFields(props, sd);
writeProperties(to, sd, props); writeProperties(to, props);
} }
public static void writeProperties(File to, StorageDirectory sd, public static void writeProperties(File to, Properties props)
Properties props) throws IOException { throws IOException {
RandomAccessFile file = new RandomAccessFile(to, "rws"); try (RandomAccessFile file = new RandomAccessFile(to, "rws");
FileOutputStream out = null; FileOutputStream out = new FileOutputStream(file.getFD())) {
try {
file.seek(0); file.seek(0);
out = new FileOutputStream(file.getFD());
/* /*
* If server is interrupted before this line, * If server is interrupted before this line,
* the version file will remain unchanged. * the version file will remain unchanged.
*/ */
props.store(out, null); props.store(out, null);
/* /*
* Now the new fields are flushed to the head of the file, but file * Now the new fields are flushed to the head of the file, but file
* length can still be larger then required and therefore the file can * length can still be larger then required and therefore the file can
* contain whole or corrupted fields from its old contents in the end. * contain whole or corrupted fields from its old contents in the end.
* If server is interrupted here and restarted later these extra fields * If server is interrupted here and restarted later these extra fields
* either should not effect server behavior or should be handled * either should not effect server behavior or should be handled
* by the server correctly. * by the server correctly.
*/ */
file.setLength(out.getChannel().position()); file.setLength(out.getChannel().position());
} finally {
if (out != null) {
out.close();
}
file.close();
} }
} }

View File

@ -44,17 +44,15 @@
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker;
import java.io.File; import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.nio.channels.FileLock; import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
@ -82,7 +80,6 @@
public class DataStorage extends Storage { public class DataStorage extends Storage {
public final static String BLOCK_SUBDIR_PREFIX = "subdir"; public final static String BLOCK_SUBDIR_PREFIX = "subdir";
final static String COPY_FILE_PREFIX = "dncp_";
final static String STORAGE_DIR_DETACHED = "detach"; final static String STORAGE_DIR_DETACHED = "detach";
public final static String STORAGE_DIR_RBW = "rbw"; public final static String STORAGE_DIR_RBW = "rbw";
public final static String STORAGE_DIR_FINALIZED = "finalized"; public final static String STORAGE_DIR_FINALIZED = "finalized";
@ -614,20 +611,22 @@ private void setFieldsFromProperties(Properties props, StorageDirectory sd,
@Override @Override
public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException { public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
File oldF = new File(sd.getRoot(), "storage"); File oldF = new File(sd.getRoot(), "storage");
if (!oldF.exists()) if (!oldF.exists()) {
return false; return false;
}
// check the layout version inside the storage file // check the layout version inside the storage file
// Lock and Read old storage file // Lock and Read old storage file
RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); try (RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
FileLock oldLock = oldFile.getChannel().tryLock(); FileLock oldLock = oldFile.getChannel().tryLock()) {
try { if (null == oldLock) {
LOG.error("Unable to acquire file lock on path " + oldF.toString());
throw new OverlappingFileLockException();
}
oldFile.seek(0); oldFile.seek(0);
int oldVersion = oldFile.readInt(); int oldVersion = oldFile.readInt();
if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) {
return false; return false;
} finally { }
oldLock.release();
oldFile.close();
} }
return true; return true;
} }
@ -1218,23 +1217,8 @@ static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl,
return; return;
} }
if (!from.isDirectory()) { if (!from.isDirectory()) {
if (from.getName().startsWith(COPY_FILE_PREFIX)) { HardLink.createHardLink(from, to);
FileInputStream in = new FileInputStream(from); hl.linkStats.countSingleLinks++;
try {
FileOutputStream out = new FileOutputStream(to);
try {
IOUtils.copyBytes(in, out, 16*1024);
hl.linkStats.countPhysicalFileCopies++;
} finally {
out.close();
}
} finally {
in.close();
}
} else {
HardLink.createHardLink(from, to);
hl.linkStats.countSingleLinks++;
}
return; return;
} }
// from is a directory // from is a directory
@ -1285,8 +1269,7 @@ public boolean accept(File dir, String name) {
String[] otherNames = from.list(new java.io.FilenameFilter() { String[] otherNames = from.list(new java.io.FilenameFilter() {
@Override @Override
public boolean accept(File dir, String name) { public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_SUBDIR_PREFIX) return name.startsWith(BLOCK_SUBDIR_PREFIX);
|| name.startsWith(COPY_FILE_PREFIX);
} }
}); });
for(int i = 0; i < otherNames.length; i++) for(int i = 0; i < otherNames.length; i++)