HDFS-4610. Reverting the patch Jenkins build is not run.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1477396 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
fb30240b87
commit
e2091275dc
@ -332,9 +332,6 @@ Trunk (Unreleased)
|
|||||||
HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows.
|
HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows.
|
||||||
(Arpit Agarwal via suresh)
|
(Arpit Agarwal via suresh)
|
||||||
|
|
||||||
HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable &
|
|
||||||
FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
|
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
|
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
|
||||||
|
@ -448,7 +448,7 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage)
|
|||||||
LOG.warn(rootPath + "is not a directory");
|
LOG.warn(rootPath + "is not a directory");
|
||||||
return StorageState.NON_EXISTENT;
|
return StorageState.NON_EXISTENT;
|
||||||
}
|
}
|
||||||
if (!FileUtil.canWrite(root)) {
|
if (!root.canWrite()) {
|
||||||
LOG.warn("Cannot access storage directory " + rootPath);
|
LOG.warn("Cannot access storage directory " + rootPath);
|
||||||
return StorageState.NON_EXISTENT;
|
return StorageState.NON_EXISTENT;
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,6 @@
|
|||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
@ -129,7 +128,7 @@ void inspectDirectory(StorageDirectory sd) throws IOException {
|
|||||||
static long readCheckpointTime(StorageDirectory sd) throws IOException {
|
static long readCheckpointTime(StorageDirectory sd) throws IOException {
|
||||||
File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
|
File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
|
||||||
long timeStamp = 0L;
|
long timeStamp = 0L;
|
||||||
if (timeFile.exists() && FileUtil.canRead(timeFile)) {
|
if (timeFile.exists() && timeFile.canRead()) {
|
||||||
DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
|
DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
|
||||||
try {
|
try {
|
||||||
timeStamp = in.readLong();
|
timeStamp = in.readLong();
|
||||||
|
@ -34,7 +34,6 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
@ -231,8 +230,8 @@ void attemptRestoreRemovedStorage() {
|
|||||||
File root = sd.getRoot();
|
File root = sd.getRoot();
|
||||||
LOG.info("currently disabled dir " + root.getAbsolutePath() +
|
LOG.info("currently disabled dir " + root.getAbsolutePath() +
|
||||||
"; type="+sd.getStorageDirType()
|
"; type="+sd.getStorageDirType()
|
||||||
+ ";canwrite="+FileUtil.canWrite(root));
|
+ ";canwrite="+root.canWrite());
|
||||||
if(root.exists() && FileUtil.canWrite(root)) {
|
if(root.exists() && root.canWrite()) {
|
||||||
LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
|
LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
|
||||||
this.addStorageDir(sd); // restore
|
this.addStorageDir(sd); // restore
|
||||||
this.removedStorageDirs.remove(sd);
|
this.removedStorageDirs.remove(sd);
|
||||||
@ -506,7 +505,7 @@ public File getFsImageName(long txid) {
|
|||||||
dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
|
dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
|
||||||
sd = it.next();
|
sd = it.next();
|
||||||
File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid);
|
File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid);
|
||||||
if(FileUtil.canRead(sd.getRoot()) && fsImage.exists())
|
if(sd.getRoot().canRead() && fsImage.exists())
|
||||||
return fsImage;
|
return fsImage;
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
@ -723,7 +722,7 @@ File findImageFile(long txid) {
|
|||||||
private File findFile(NameNodeDirType dirType, String name) {
|
private File findFile(NameNodeDirType dirType, String name) {
|
||||||
for (StorageDirectory sd : dirIterable(dirType)) {
|
for (StorageDirectory sd : dirIterable(dirType)) {
|
||||||
File candidate = new File(sd.getCurrentDir(), name);
|
File candidate = new File(sd.getCurrentDir(), name);
|
||||||
if (FileUtil.canRead(sd.getCurrentDir()) &&
|
if (sd.getCurrentDir().canRead() &&
|
||||||
candidate.exists()) {
|
candidate.exists()) {
|
||||||
return candidate;
|
return candidate;
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,6 @@
|
|||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
@ -103,7 +102,7 @@ static void downloadEditsToStorage(String fsName, RemoteEditLog log,
|
|||||||
assert !dstFiles.isEmpty() : "No checkpoint targets.";
|
assert !dstFiles.isEmpty() : "No checkpoint targets.";
|
||||||
|
|
||||||
for (File f : dstFiles) {
|
for (File f : dstFiles) {
|
||||||
if (f.exists() && FileUtil.canRead(f)) {
|
if (f.exists() && f.canRead()) {
|
||||||
LOG.info("Skipping download of remote edit log " +
|
LOG.info("Skipping download of remote edit log " +
|
||||||
log + " since it already is stored locally at " + f);
|
log + " since it already is stored locally at " + f);
|
||||||
return;
|
return;
|
||||||
|
@ -681,9 +681,9 @@ private String createPermissionsDiagnosisString(File path) {
|
|||||||
sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
|
sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
|
||||||
sb.append("\tpermissions: ");
|
sb.append("\tpermissions: ");
|
||||||
sb.append(path.isDirectory() ? "d": "-");
|
sb.append(path.isDirectory() ? "d": "-");
|
||||||
sb.append(FileUtil.canRead(path) ? "r" : "-");
|
sb.append(path.canRead() ? "r" : "-");
|
||||||
sb.append(FileUtil.canWrite(path) ? "w" : "-");
|
sb.append(path.canWrite() ? "w" : "-");
|
||||||
sb.append(FileUtil.canExecute(path) ? "x" : "-");
|
sb.append(path.canExecute() ? "x" : "-");
|
||||||
sb.append("\n");
|
sb.append("\n");
|
||||||
path = path.getParentFile();
|
path = path.getParentFile();
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.BlockReader;
|
import org.apache.hadoop.hdfs.BlockReader;
|
||||||
import org.apache.hadoop.hdfs.BlockReaderFactory;
|
import org.apache.hadoop.hdfs.BlockReaderFactory;
|
||||||
@ -92,10 +91,10 @@ public void setUp() throws Exception {
|
|||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
if(data_fail != null) {
|
if(data_fail != null) {
|
||||||
FileUtil.setWritable(data_fail, true);
|
data_fail.setWritable(true);
|
||||||
}
|
}
|
||||||
if(failedDir != null) {
|
if(failedDir != null) {
|
||||||
FileUtil.setWritable(failedDir, true);
|
failedDir.setWritable(true);
|
||||||
}
|
}
|
||||||
if(cluster != null) {
|
if(cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
@ -89,8 +88,8 @@ public void setUp() throws Exception {
|
|||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
|
new File(dataDir, "data"+(2*i+1)).setExecutable(true);
|
||||||
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
|
new File(dataDir, "data"+(2*i+2)).setExecutable(true);
|
||||||
}
|
}
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
@ -132,8 +131,8 @@ public void testSuccessiveVolumeFailures() throws Exception {
|
|||||||
* fail. The client does not retry failed nodes even though
|
* fail. The client does not retry failed nodes even though
|
||||||
* perhaps they could succeed because just a single volume failed.
|
* perhaps they could succeed because just a single volume failed.
|
||||||
*/
|
*/
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
|
assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
|
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create file1 and wait for 3 replicas (ie all DNs can still
|
* Create file1 and wait for 3 replicas (ie all DNs can still
|
||||||
@ -169,7 +168,7 @@ public void testSuccessiveVolumeFailures() throws Exception {
|
|||||||
* Now fail a volume on the third datanode. We should be able to get
|
* Now fail a volume on the third datanode. We should be able to get
|
||||||
* three replicas since we've already identified the other failures.
|
* three replicas since we've already identified the other failures.
|
||||||
*/
|
*/
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false));
|
assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(false));
|
||||||
Path file2 = new Path("/test2");
|
Path file2 = new Path("/test2");
|
||||||
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
|
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
|
||||||
DFSTestUtil.waitReplication(fs, file2, (short)3);
|
DFSTestUtil.waitReplication(fs, file2, (short)3);
|
||||||
@ -201,7 +200,7 @@ public void testSuccessiveVolumeFailures() throws Exception {
|
|||||||
* and that it's no longer up. Only wait for two replicas since
|
* and that it's no longer up. Only wait for two replicas since
|
||||||
* we'll never get a third.
|
* we'll never get a third.
|
||||||
*/
|
*/
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false));
|
assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(false));
|
||||||
Path file3 = new Path("/test3");
|
Path file3 = new Path("/test3");
|
||||||
DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
|
DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
|
||||||
DFSTestUtil.waitReplication(fs, file3, (short)2);
|
DFSTestUtil.waitReplication(fs, file3, (short)2);
|
||||||
@ -223,10 +222,10 @@ public void testSuccessiveVolumeFailures() throws Exception {
|
|||||||
* restart, so file creation should be able to succeed after
|
* restart, so file creation should be able to succeed after
|
||||||
* restoring the data directories and restarting the datanodes.
|
* restoring the data directories and restarting the datanodes.
|
||||||
*/
|
*/
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true));
|
assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(true));
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
|
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true));
|
assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(true));
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true));
|
assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(true));
|
||||||
cluster.restartDataNodes();
|
cluster.restartDataNodes();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
Path file4 = new Path("/test4");
|
Path file4 = new Path("/test4");
|
||||||
@ -262,8 +261,8 @@ public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
|
|||||||
// third healthy so one node in the pipeline will not fail).
|
// third healthy so one node in the pipeline will not fail).
|
||||||
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
|
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
|
assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
|
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
|
||||||
|
|
||||||
Path file1 = new Path("/test1");
|
Path file1 = new Path("/test1");
|
||||||
DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
|
DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
|
||||||
|
@ -77,8 +77,8 @@ public void setUp() throws Exception {
|
|||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
|
new File(dataDir, "data"+(2*i+1)).setExecutable(true);
|
||||||
FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
|
new File(dataDir, "data"+(2*i+2)).setExecutable(true);
|
||||||
}
|
}
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
@ -152,7 +152,7 @@ public void testConfigureMinValidVolumes() throws Exception {
|
|||||||
|
|
||||||
// Fail a volume on the 2nd DN
|
// Fail a volume on the 2nd DN
|
||||||
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
|
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
|
||||||
|
|
||||||
// Should only get two replicas (the first DN and the 3rd)
|
// Should only get two replicas (the first DN and the 3rd)
|
||||||
Path file1 = new Path("/test1");
|
Path file1 = new Path("/test1");
|
||||||
@ -165,7 +165,7 @@ public void testConfigureMinValidVolumes() throws Exception {
|
|||||||
|
|
||||||
// If we restore the volume we should still only be able to get
|
// If we restore the volume we should still only be able to get
|
||||||
// two replicas since the DN is still considered dead.
|
// two replicas since the DN is still considered dead.
|
||||||
assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
|
assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
|
||||||
Path file2 = new Path("/test2");
|
Path file2 = new Path("/test2");
|
||||||
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
|
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
|
||||||
DFSTestUtil.waitReplication(fs, file2, (short)2);
|
DFSTestUtil.waitReplication(fs, file2, (short)2);
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
@ -107,8 +106,8 @@ public void testShutdown() throws Exception {
|
|||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
// restore its old permission
|
// restore its old permission
|
||||||
FileUtil.setWritable(dir1, true);
|
dir1.setWritable(true);
|
||||||
FileUtil.setWritable(dir2, true);
|
dir2.setWritable(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ public void testNameDirError() throws IOException {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
// Simulate the mount going read-only
|
// Simulate the mount going read-only
|
||||||
FileUtil.setWritable(dir, false);
|
dir.setWritable(false);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||||
.format(false).build();
|
.format(false).build();
|
||||||
fail("NN should have failed to start with " + dir + " set unreadable");
|
fail("NN should have failed to start with " + dir + " set unreadable");
|
||||||
@ -167,7 +167,7 @@ public void testNameDirError() throws IOException {
|
|||||||
} finally {
|
} finally {
|
||||||
cleanup(cluster);
|
cleanup(cluster);
|
||||||
cluster = null;
|
cluster = null;
|
||||||
FileUtil.setWritable(dir, true);
|
dir.setWritable(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1825,7 +1825,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception {
|
|||||||
StorageDirectory sd1 = storage.getStorageDir(1);
|
StorageDirectory sd1 = storage.getStorageDir(1);
|
||||||
|
|
||||||
currentDir = sd0.getCurrentDir();
|
currentDir = sd0.getCurrentDir();
|
||||||
FileUtil.setExecutable(currentDir, false);
|
currentDir.setExecutable(false);
|
||||||
|
|
||||||
// Upload checkpoint when NN has a bad storage dir. This should
|
// Upload checkpoint when NN has a bad storage dir. This should
|
||||||
// succeed and create the checkpoint in the good dir.
|
// succeed and create the checkpoint in the good dir.
|
||||||
@ -1835,7 +1835,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception {
|
|||||||
new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
|
new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
|
||||||
|
|
||||||
// Restore the good dir
|
// Restore the good dir
|
||||||
FileUtil.setExecutable(currentDir, true);
|
currentDir.setExecutable(true);
|
||||||
nn.restoreFailedStorage("true");
|
nn.restoreFailedStorage("true");
|
||||||
nn.rollEditLog();
|
nn.rollEditLog();
|
||||||
|
|
||||||
@ -1846,7 +1846,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception {
|
|||||||
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
|
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
|
||||||
} finally {
|
} finally {
|
||||||
if (currentDir != null) {
|
if (currentDir != null) {
|
||||||
FileUtil.setExecutable(currentDir, true);
|
currentDir.setExecutable(true);
|
||||||
}
|
}
|
||||||
cleanup(secondary);
|
cleanup(secondary);
|
||||||
secondary = null;
|
secondary = null;
|
||||||
@ -1896,7 +1896,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
|
|||||||
StorageDirectory sd0 = storage.getStorageDir(0);
|
StorageDirectory sd0 = storage.getStorageDir(0);
|
||||||
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
|
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
|
||||||
currentDir = sd0.getCurrentDir();
|
currentDir = sd0.getCurrentDir();
|
||||||
FileUtil.setExecutable(currentDir, false);
|
currentDir.setExecutable(false);
|
||||||
|
|
||||||
// Try to upload checkpoint -- this should fail since there are no
|
// Try to upload checkpoint -- this should fail since there are no
|
||||||
// valid storage dirs
|
// valid storage dirs
|
||||||
@ -1909,7 +1909,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Restore the good dir
|
// Restore the good dir
|
||||||
FileUtil.setExecutable(currentDir, true);
|
currentDir.setExecutable(true);
|
||||||
nn.restoreFailedStorage("true");
|
nn.restoreFailedStorage("true");
|
||||||
nn.rollEditLog();
|
nn.rollEditLog();
|
||||||
|
|
||||||
@ -1920,7 +1920,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
|
|||||||
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
|
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
|
||||||
} finally {
|
} finally {
|
||||||
if (currentDir != null) {
|
if (currentDir != null) {
|
||||||
FileUtil.setExecutable(currentDir, true);
|
currentDir.setExecutable(true);
|
||||||
}
|
}
|
||||||
cleanup(secondary);
|
cleanup(secondary);
|
||||||
secondary = null;
|
secondary = null;
|
||||||
|
@ -881,14 +881,14 @@ public void testFailedOpen() throws Exception {
|
|||||||
logDir.mkdirs();
|
logDir.mkdirs();
|
||||||
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
|
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
|
||||||
try {
|
try {
|
||||||
FileUtil.setWritable(logDir, false);
|
logDir.setWritable(false);
|
||||||
log.openForWrite();
|
log.openForWrite();
|
||||||
fail("Did no throw exception on only having a bad dir");
|
fail("Did no throw exception on only having a bad dir");
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
GenericTestUtils.assertExceptionContains(
|
GenericTestUtils.assertExceptionContains(
|
||||||
"too few journals successfully started", ioe);
|
"too few journals successfully started", ioe);
|
||||||
} finally {
|
} finally {
|
||||||
FileUtil.setWritable(logDir, true);
|
logDir.setWritable(true);
|
||||||
log.close();
|
log.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
@ -107,10 +106,10 @@ public void testPurgingWithNameEditsDirAfterFailure()
|
|||||||
getInProgressEditsFileName(5));
|
getInProgressEditsFileName(5));
|
||||||
|
|
||||||
LOG.info("Failing first storage dir by chmodding it");
|
LOG.info("Failing first storage dir by chmodding it");
|
||||||
FileUtil.setExecutable(sd0, false);
|
sd0.setExecutable(false);
|
||||||
doSaveNamespace(nn);
|
doSaveNamespace(nn);
|
||||||
LOG.info("Restoring accessibility of first storage dir");
|
LOG.info("Restoring accessibility of first storage dir");
|
||||||
FileUtil.setExecutable(sd0, true);
|
sd0.setExecutable(true);
|
||||||
|
|
||||||
LOG.info("nothing should have been purged in first storage dir");
|
LOG.info("nothing should have been purged in first storage dir");
|
||||||
assertGlobEquals(cd0, "fsimage_\\d*",
|
assertGlobEquals(cd0, "fsimage_\\d*",
|
||||||
@ -139,7 +138,7 @@ public void testPurgingWithNameEditsDirAfterFailure()
|
|||||||
assertGlobEquals(cd0, "edits_.*",
|
assertGlobEquals(cd0, "edits_.*",
|
||||||
getInProgressEditsFileName(9));
|
getInProgressEditsFileName(9));
|
||||||
} finally {
|
} finally {
|
||||||
FileUtil.setExecutable(sd0, true);
|
sd0.setExecutable(true);
|
||||||
|
|
||||||
LOG.info("Shutting down...");
|
LOG.info("Shutting down...");
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
|
Loading…
Reference in New Issue
Block a user