HDFS-4996. ClientProtocol#metaSave can be made idempotent by overwriting the output file instead of appending to it. Contributed by Chris Nauroth.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1504679 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2013-07-18 21:57:24 +00:00
parent cc536fe4da
commit e2efe52ac8
5 changed files with 61 additions and 13 deletions

View File

@ -381,6 +381,7 @@ Administration Commands
*-----------------+-----------------------------------------------------------+ *-----------------+-----------------------------------------------------------+
| -metasave filename | Save Namenode's primary data structures to <filename> in | -metasave filename | Save Namenode's primary data structures to <filename> in
| the directory specified by hadoop.log.dir property. | the directory specified by hadoop.log.dir property.
| <filename> is overwritten if it exists.
| <filename> will contain one line for each of the following\ | <filename> will contain one line for each of the following\
| 1. Datanodes heart beating with Namenode\ | 1. Datanodes heart beating with Namenode\
| 2. Blocks waiting to be replicated\ | 2. Blocks waiting to be replicated\

View File

@ -449,6 +449,9 @@ Release 2.1.0-beta - 2013-07-02
HDFS-4992. Make balancer's mover thread count and dispatcher thread count HDFS-4992. Make balancer's mover thread count and dispatcher thread count
configurable. (Max Lapan via szetszwo) configurable. (Max Lapan via szetszwo)
HDFS-4996. ClientProtocol#metaSave can be made idempotent by overwriting the
output file instead of appending to it. (cnauroth)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm) HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm)

View File

@ -1197,7 +1197,7 @@ void metaSave(String filename) throws IOException {
checkOperation(OperationCategory.UNCHECKED); checkOperation(OperationCategory.UNCHECKED);
File file = new File(System.getProperty("hadoop.log.dir"), filename); File file = new File(System.getProperty("hadoop.log.dir"), filename);
PrintWriter out = new PrintWriter(new BufferedWriter( PrintWriter out = new PrintWriter(new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(file, true), Charsets.UTF_8))); new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));
metaSave(out); metaSave(out);
out.flush(); out.flush();
out.close(); out.close();

View File

@ -628,6 +628,7 @@ private void printHelp(String cmd) {
String metaSave = "-metasave <filename>: \tSave Namenode's primary data structures\n" + String metaSave = "-metasave <filename>: \tSave Namenode's primary data structures\n" +
"\t\tto <filename> in the directory specified by hadoop.log.dir property.\n" + "\t\tto <filename> in the directory specified by hadoop.log.dir property.\n" +
"\t\t<filename> is overwritten if it exists.\n" +
"\t\t<filename> will contain one line for each of the following\n" + "\t\t<filename> will contain one line for each of the following\n" +
"\t\t\t1. Datanodes heart beating with Namenode\n" + "\t\t\t1. Datanodes heart beating with Namenode\n" +
"\t\t\t2. Blocks waiting to be replicated\n" + "\t\t\t2. Blocks waiting to be replicated\n" +

View File

@ -18,9 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStreamReader; import java.io.InputStreamReader;
@ -28,6 +30,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -45,6 +48,7 @@ public class TestMetaSave {
static final int blockSize = 8192; static final int blockSize = 8192;
private static MiniDFSCluster cluster = null; private static MiniDFSCluster cluster = null;
private static FileSystem fileSys = null; private static FileSystem fileSys = null;
private static FSNamesystem namesystem = null;
@BeforeClass @BeforeClass
public static void setUp() throws IOException { public static void setUp() throws IOException {
@ -59,6 +63,7 @@ public static void setUp() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive(); cluster.waitActive();
fileSys = cluster.getFileSystem(); fileSys = cluster.getFileSystem();
namesystem = cluster.getNamesystem();
} }
/** /**
@ -66,9 +71,6 @@ public static void setUp() throws IOException {
*/ */
@Test @Test
public void testMetaSave() throws IOException, InterruptedException { public void testMetaSave() throws IOException, InterruptedException {
final FSNamesystem namesystem = cluster.getNamesystem();
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
Path file = new Path("/filestatus" + i); Path file = new Path("/filestatus" + i);
DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2, DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2,
@ -83,9 +85,8 @@ public void testMetaSave() throws IOException, InterruptedException {
namesystem.metaSave("metasave.out.txt"); namesystem.metaSave("metasave.out.txt");
// Verification // Verification
String logFile = System.getProperty("hadoop.log.dir") + "/" FileInputStream fstream = new FileInputStream(getLogFile(
+ "metasave.out.txt"; "metasave.out.txt"));
FileInputStream fstream = new FileInputStream(logFile);
DataInputStream in = new DataInputStream(fstream); DataInputStream in = new DataInputStream(fstream);
BufferedReader reader = null; BufferedReader reader = null;
try { try {
@ -112,9 +113,6 @@ public void testMetaSave() throws IOException, InterruptedException {
@Test @Test
public void testMetasaveAfterDelete() public void testMetasaveAfterDelete()
throws IOException, InterruptedException { throws IOException, InterruptedException {
final FSNamesystem namesystem = cluster.getNamesystem();
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
Path file = new Path("/filestatus" + i); Path file = new Path("/filestatus" + i);
DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2, DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2,
@ -131,11 +129,10 @@ public void testMetasaveAfterDelete()
namesystem.metaSave("metasaveAfterDelete.out.txt"); namesystem.metaSave("metasaveAfterDelete.out.txt");
// Verification // Verification
String logFile = System.getProperty("hadoop.log.dir") + "/"
+ "metasaveAfterDelete.out.txt";
BufferedReader reader = null; BufferedReader reader = null;
try { try {
FileInputStream fstream = new FileInputStream(logFile); FileInputStream fstream = new FileInputStream(getLogFile(
"metasaveAfterDelete.out.txt"));
DataInputStream in = new DataInputStream(fstream); DataInputStream in = new DataInputStream(fstream);
reader = new BufferedReader(new InputStreamReader(in)); reader = new BufferedReader(new InputStreamReader(in));
reader.readLine(); reader.readLine();
@ -155,6 +152,42 @@ public void testMetasaveAfterDelete()
} }
} }
/**
* Tests that metasave overwrites the output file (not append).
*/
@Test
public void testMetaSaveOverwrite() throws Exception {
// metaSave twice.
namesystem.metaSave("metaSaveOverwrite.out.txt");
namesystem.metaSave("metaSaveOverwrite.out.txt");
// Read output file.
FileInputStream fis = null;
InputStreamReader isr = null;
BufferedReader rdr = null;
try {
fis = new FileInputStream(getLogFile("metaSaveOverwrite.out.txt"));
isr = new InputStreamReader(fis);
rdr = new BufferedReader(isr);
// Validate that file was overwritten (not appended) by checking for
// presence of only one "Live Datanodes" line.
boolean foundLiveDatanodesLine = false;
String line = rdr.readLine();
while (line != null) {
if (line.startsWith("Live Datanodes")) {
if (foundLiveDatanodesLine) {
fail("multiple Live Datanodes lines, output file not overwritten");
}
foundLiveDatanodesLine = true;
}
line = rdr.readLine();
}
} finally {
IOUtils.cleanup(null, rdr, isr, fis);
}
}
@AfterClass @AfterClass
public static void tearDown() throws IOException { public static void tearDown() throws IOException {
if (fileSys != null) if (fileSys != null)
@ -162,4 +195,14 @@ public static void tearDown() throws IOException {
if (cluster != null) if (cluster != null)
cluster.shutdown(); cluster.shutdown();
} }
/**
* Returns a File for the given name inside the log directory.
*
* @param name String file name
* @return File for given name inside log directory
*/
private static File getLogFile(String name) {
return new File(System.getProperty("hadoop.log.dir"), name);
}
} }