HDFS-3522. If a namenode is in safemode, it should throw SafeModeException when getBlockLocations has zero locations. Contributed by Brandon Li
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1349088 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
eff9fa1aad
commit
543f86631b
@ -316,6 +316,9 @@ Branch-2 ( Unreleased changes )
|
|||||||
|
|
||||||
HDFS-3517. TestStartup should bind ephemeral ports. (eli)
|
HDFS-3517. TestStartup should bind ephemeral ports. (eli)
|
||||||
|
|
||||||
|
HDFS-3522. If a namenode is in safemode, it should throw SafeModeException
|
||||||
|
when getBlockLocations has zero locations. (Brandon Li via szetszwo)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-3042 SUBTASKS
|
BREAKDOWN OF HDFS-3042 SUBTASKS
|
||||||
|
|
||||||
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
|
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
|
||||||
|
@ -1089,7 +1089,8 @@ void setOwner(String src, String username, String group)
|
|||||||
LocatedBlocks getBlockLocations(String clientMachine, String src,
|
LocatedBlocks getBlockLocations(String clientMachine, String src,
|
||||||
long offset, long length) throws AccessControlException,
|
long offset, long length) throws AccessControlException,
|
||||||
FileNotFoundException, UnresolvedLinkException, IOException {
|
FileNotFoundException, UnresolvedLinkException, IOException {
|
||||||
LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true);
|
LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true,
|
||||||
|
true);
|
||||||
if (blocks != null) {
|
if (blocks != null) {
|
||||||
blockManager.getDatanodeManager().sortLocatedBlocks(
|
blockManager.getDatanodeManager().sortLocatedBlocks(
|
||||||
clientMachine, blocks.getLocatedBlocks());
|
clientMachine, blocks.getLocatedBlocks());
|
||||||
@ -1103,8 +1104,8 @@ LocatedBlocks getBlockLocations(String clientMachine, String src,
|
|||||||
* @throws FileNotFoundException, UnresolvedLinkException, IOException
|
* @throws FileNotFoundException, UnresolvedLinkException, IOException
|
||||||
*/
|
*/
|
||||||
LocatedBlocks getBlockLocations(String src, long offset, long length,
|
LocatedBlocks getBlockLocations(String src, long offset, long length,
|
||||||
boolean doAccessTime, boolean needBlockToken) throws FileNotFoundException,
|
boolean doAccessTime, boolean needBlockToken, boolean checkSafeMode)
|
||||||
UnresolvedLinkException, IOException {
|
throws FileNotFoundException, UnresolvedLinkException, IOException {
|
||||||
if (isPermissionEnabled) {
|
if (isPermissionEnabled) {
|
||||||
checkPathAccess(src, FsAction.READ);
|
checkPathAccess(src, FsAction.READ);
|
||||||
}
|
}
|
||||||
@ -1124,6 +1125,15 @@ LocatedBlocks getBlockLocations(String src, long offset, long length,
|
|||||||
Server.getRemoteIp(),
|
Server.getRemoteIp(),
|
||||||
"open", src, null, null);
|
"open", src, null, null);
|
||||||
}
|
}
|
||||||
|
if (checkSafeMode && isInSafeMode()) {
|
||||||
|
for (LocatedBlock b : ret.getLocatedBlocks()) {
|
||||||
|
// if safemode & no block locations yet then throw safemodeException
|
||||||
|
if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
|
||||||
|
throw new SafeModeException("Zero blocklocations for " + src,
|
||||||
|
safeMode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,7 +277,7 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException {
|
|||||||
// Get block locations without updating the file access time
|
// Get block locations without updating the file access time
|
||||||
// and without block access tokens
|
// and without block access tokens
|
||||||
LocatedBlocks blocks = namenode.getNamesystem().getBlockLocations(path, 0,
|
LocatedBlocks blocks = namenode.getNamesystem().getBlockLocations(path, 0,
|
||||||
fileLen, false, false);
|
fileLen, false, false, false);
|
||||||
if (blocks == null) { // the file is deleted
|
if (blocks == null) { // the file is deleted
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
@ -31,9 +32,11 @@
|
|||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
@ -372,4 +375,76 @@ public void testSafeModeUtils() throws IOException {
|
|||||||
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||||
assertFalse("State was expected to be out of safemode.", dfs.isInSafeMode());
|
assertFalse("State was expected to be out of safemode.", dfs.isInSafeMode());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSafeModeWhenZeroBlockLocations() throws IOException {
|
||||||
|
|
||||||
|
try {
|
||||||
|
Path file1 = new Path("/tmp/testManualSafeMode/file1");
|
||||||
|
Path file2 = new Path("/tmp/testManualSafeMode/file2");
|
||||||
|
|
||||||
|
System.out.println("Created file1 and file2.");
|
||||||
|
|
||||||
|
// create two files with one block each.
|
||||||
|
DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
|
||||||
|
DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
|
||||||
|
checkGetBlockLocationsWorks(fs, file1);
|
||||||
|
|
||||||
|
NameNode namenode = cluster.getNameNode();
|
||||||
|
|
||||||
|
// manually set safemode.
|
||||||
|
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||||
|
assertTrue("should still be in SafeMode", namenode.isInSafeMode());
|
||||||
|
// getBlock locations should still work since block locations exists
|
||||||
|
checkGetBlockLocationsWorks(fs, file1);
|
||||||
|
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||||
|
assertFalse("should not be in SafeMode", namenode.isInSafeMode());
|
||||||
|
|
||||||
|
|
||||||
|
// Now 2nd part of the tests where there aren't block locations
|
||||||
|
cluster.shutdownDataNodes();
|
||||||
|
cluster.shutdownNameNode(0);
|
||||||
|
|
||||||
|
// now bring up just the NameNode.
|
||||||
|
cluster.restartNameNode();
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
System.out.println("Restarted cluster with just the NameNode");
|
||||||
|
|
||||||
|
namenode = cluster.getNameNode();
|
||||||
|
|
||||||
|
assertTrue("No datanode is started. Should be in SafeMode",
|
||||||
|
namenode.isInSafeMode());
|
||||||
|
FileStatus stat = fs.getFileStatus(file1);
|
||||||
|
try {
|
||||||
|
fs.getFileBlockLocations(stat, 0, 1000);
|
||||||
|
assertTrue("Should have got safemode exception", false);
|
||||||
|
} catch (SafeModeException e) {
|
||||||
|
// as expected
|
||||||
|
} catch (RemoteException re) {
|
||||||
|
if (!re.getClassName().equals(SafeModeException.class.getName()))
|
||||||
|
assertTrue("Should have got safemode exception", false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||||
|
assertFalse("Should not be in safemode", namenode.isInSafeMode());
|
||||||
|
checkGetBlockLocationsWorks(fs, file1);
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
if(fs != null) fs.close();
|
||||||
|
if(cluster!= null) cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void checkGetBlockLocationsWorks(FileSystem fs, Path fileName) throws IOException {
|
||||||
|
FileStatus stat = fs.getFileStatus(fileName);
|
||||||
|
try {
|
||||||
|
fs.getFileBlockLocations(stat, 0, 1000);
|
||||||
|
} catch (SafeModeException e) {
|
||||||
|
assertTrue("Should have not got safemode exception", false);
|
||||||
|
} catch (RemoteException re) {
|
||||||
|
assertTrue("Should have not got safemode exception", false);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ public static FSNamesystem getNamesystem(NameNode namenode) {
|
|||||||
public static LocatedBlocks getBlockLocations(NameNode namenode,
|
public static LocatedBlocks getBlockLocations(NameNode namenode,
|
||||||
String src, long offset, long length) throws IOException {
|
String src, long offset, long length) throws IOException {
|
||||||
return namenode.getNamesystem().getBlockLocations(
|
return namenode.getNamesystem().getBlockLocations(
|
||||||
src, offset, length, false, true);
|
src, offset, length, false, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
|
public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
|
||||||
|
Loading…
Reference in New Issue
Block a user