HDFS-4248. Renaming directories may incorrectly remove the paths in leases under the tree. Contributed by daryn
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1416064 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
64fe19e0d4
commit
88eea21572
@ -2037,6 +2037,9 @@ Release 0.23.6 - UNRELEASED
|
||||
|
||||
HDFS-4247. saveNamespace should be tolerant of dangling lease (daryn)
|
||||
|
||||
HDFS-4248. Renaming directories may incorrectly remove the paths in leases
|
||||
under the tree. (daryn via szetszwo)
|
||||
|
||||
Release 0.23.5 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -575,6 +575,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
|
||||
// update modification time of dst and the parent of src
|
||||
srcInodes[srcInodes.length-2].setModificationTime(timestamp);
|
||||
dstInodes[dstInodes.length-2].setModificationTime(timestamp);
|
||||
// update moved leases with new filename
|
||||
getFSNamesystem().unprotectedChangeLease(src, dst);
|
||||
return true;
|
||||
}
|
||||
} finally {
|
||||
@ -729,6 +731,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
|
||||
}
|
||||
srcInodes[srcInodes.length - 2].setModificationTime(timestamp);
|
||||
dstInodes[dstInodes.length - 2].setModificationTime(timestamp);
|
||||
// update moved lease with new filename
|
||||
getFSNamesystem().unprotectedChangeLease(src, dst);
|
||||
|
||||
// Collect the blocks and remove the lease for previous dst
|
||||
int filesDeleted = 0;
|
||||
|
@ -31,7 +31,6 @@
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
@ -360,10 +359,8 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
||||
}
|
||||
case OP_RENAME_OLD: {
|
||||
RenameOldOp renameOp = (RenameOldOp)op;
|
||||
HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
|
||||
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
|
||||
renameOp.timestamp);
|
||||
fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo);
|
||||
break;
|
||||
}
|
||||
case OP_DELETE: {
|
||||
@ -433,11 +430,8 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
||||
}
|
||||
case OP_RENAME: {
|
||||
RenameOp renameOp = (RenameOp)op;
|
||||
|
||||
HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
|
||||
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
|
||||
renameOp.timestamp, renameOp.options);
|
||||
fsNamesys.unprotectedChangeLease(renameOp.src, renameOp.dst, dinfo);
|
||||
break;
|
||||
}
|
||||
case OP_GET_DELEGATION_TOKEN: {
|
||||
|
@ -2583,15 +2583,15 @@ private boolean renameToInternal(String src, String dst)
|
||||
if (isPermissionEnabled) {
|
||||
//We should not be doing this. This is move() not renameTo().
|
||||
//but for now,
|
||||
//NOTE: yes, this is bad! it's assuming much lower level behavior
|
||||
// of rewriting the dst
|
||||
String actualdst = dir.isDir(dst)?
|
||||
dst + Path.SEPARATOR + new Path(src).getName(): dst;
|
||||
checkParentAccess(src, FsAction.WRITE);
|
||||
checkAncestorAccess(actualdst, FsAction.WRITE);
|
||||
}
|
||||
|
||||
HdfsFileStatus dinfo = dir.getFileInfo(dst, false);
|
||||
if (dir.renameTo(src, dst)) {
|
||||
unprotectedChangeLease(src, dst, dinfo); // update lease with new filename
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -2642,9 +2642,7 @@ private void renameToInternal(String src, String dst,
|
||||
checkAncestorAccess(dst, FsAction.WRITE);
|
||||
}
|
||||
|
||||
HdfsFileStatus dinfo = dir.getFileInfo(dst, false);
|
||||
dir.renameTo(src, dst, options);
|
||||
unprotectedChangeLease(src, dst, dinfo); // update lease with new filename
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4885,31 +4883,9 @@ private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock,
|
||||
|
||||
// rename was successful. If any part of the renamed subtree had
|
||||
// files that were being written to, update with new filename.
|
||||
void unprotectedChangeLease(String src, String dst, HdfsFileStatus dinfo) {
|
||||
String overwrite;
|
||||
String replaceBy;
|
||||
void unprotectedChangeLease(String src, String dst) {
|
||||
assert hasWriteLock();
|
||||
|
||||
boolean destinationExisted = true;
|
||||
if (dinfo == null) {
|
||||
destinationExisted = false;
|
||||
}
|
||||
|
||||
if (destinationExisted && dinfo.isDir()) {
|
||||
Path spath = new Path(src);
|
||||
Path parent = spath.getParent();
|
||||
if (parent.isRoot()) {
|
||||
overwrite = parent.toString();
|
||||
} else {
|
||||
overwrite = parent.toString() + Path.SEPARATOR;
|
||||
}
|
||||
replaceBy = dst + Path.SEPARATOR;
|
||||
} else {
|
||||
overwrite = src;
|
||||
replaceBy = dst;
|
||||
}
|
||||
|
||||
leaseManager.changeLease(src, dst, overwrite, replaceBy);
|
||||
leaseManager.changeLease(src, dst);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -331,22 +331,19 @@ long getLastUpdate() {
|
||||
}
|
||||
}
|
||||
|
||||
synchronized void changeLease(String src, String dst,
|
||||
String overwrite, String replaceBy) {
|
||||
synchronized void changeLease(String src, String dst) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(getClass().getSimpleName() + ".changelease: " +
|
||||
" src=" + src + ", dest=" + dst +
|
||||
", overwrite=" + overwrite +
|
||||
", replaceBy=" + replaceBy);
|
||||
" src=" + src + ", dest=" + dst);
|
||||
}
|
||||
|
||||
final int len = overwrite.length();
|
||||
final int len = src.length();
|
||||
for(Map.Entry<String, Lease> entry
|
||||
: findLeaseWithPrefixPath(src, sortedLeasesByPath).entrySet()) {
|
||||
final String oldpath = entry.getKey();
|
||||
final Lease lease = entry.getValue();
|
||||
//overwrite must be a prefix of oldpath
|
||||
final String newpath = replaceBy + oldpath.substring(len);
|
||||
// replace stem of src with new destination
|
||||
final String newpath = dst + oldpath.substring(len);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("changeLease: replacing " + oldpath + " with " + newpath);
|
||||
}
|
||||
|
@ -30,7 +30,9 @@
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
@ -49,6 +51,10 @@ static boolean hasLease(MiniDFSCluster cluster, Path src) {
|
||||
).getLeaseByPath(src.toString()) != null;
|
||||
}
|
||||
|
||||
static int leaseCount(MiniDFSCluster cluster) {
|
||||
return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease();
|
||||
}
|
||||
|
||||
static final String dirString = "/test/lease";
|
||||
final Path dir = new Path(dirString);
|
||||
static final Log LOG = LogFactory.getLog(TestLease.class);
|
||||
@ -126,6 +132,96 @@ public void testLeaseAbort() throws Exception {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLeaseAfterRename() throws Exception {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
try {
|
||||
Path p = new Path("/test-file");
|
||||
Path d = new Path("/test-d");
|
||||
Path d2 = new Path("/test-d-other");
|
||||
|
||||
// open a file to get a lease
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
FSDataOutputStream out = fs.create(p);
|
||||
out.writeBytes("something");
|
||||
//out.hsync();
|
||||
Assert.assertTrue(hasLease(cluster, p));
|
||||
Assert.assertEquals(1, leaseCount(cluster));
|
||||
|
||||
// just to ensure first fs doesn't have any logic to twiddle leases
|
||||
DistributedFileSystem fs2 = (DistributedFileSystem) FileSystem.newInstance(fs.getUri(), fs.getConf());
|
||||
|
||||
// rename the file into an existing dir
|
||||
LOG.info("DMS: rename file into dir");
|
||||
Path pRenamed = new Path(d, p.getName());
|
||||
fs2.mkdirs(d);
|
||||
fs2.rename(p, pRenamed);
|
||||
Assert.assertFalse(p+" exists", fs2.exists(p));
|
||||
Assert.assertTrue(pRenamed+" not found", fs2.exists(pRenamed));
|
||||
Assert.assertFalse("has lease for "+p, hasLease(cluster, p));
|
||||
Assert.assertTrue("no lease for "+pRenamed, hasLease(cluster, pRenamed));
|
||||
Assert.assertEquals(1, leaseCount(cluster));
|
||||
|
||||
// rename the parent dir to a new non-existent dir
|
||||
LOG.info("DMS: rename parent dir");
|
||||
Path pRenamedAgain = new Path(d2, pRenamed.getName());
|
||||
fs2.rename(d, d2);
|
||||
// src gone
|
||||
Assert.assertFalse(d+" exists", fs2.exists(d));
|
||||
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
|
||||
// dst checks
|
||||
Assert.assertTrue(d2+" not found", fs2.exists(d2));
|
||||
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
|
||||
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
|
||||
Assert.assertEquals(1, leaseCount(cluster));
|
||||
|
||||
// rename the parent dir to existing dir
|
||||
// NOTE: rename w/o options moves paths into existing dir
|
||||
LOG.info("DMS: rename parent again");
|
||||
pRenamed = pRenamedAgain;
|
||||
pRenamedAgain = new Path(new Path(d, d2.getName()), p.getName());
|
||||
fs2.mkdirs(d);
|
||||
fs2.rename(d2, d);
|
||||
// src gone
|
||||
Assert.assertFalse(d2+" exists", fs2.exists(d2));
|
||||
Assert.assertFalse("no lease for "+pRenamed, hasLease(cluster, pRenamed));
|
||||
// dst checks
|
||||
Assert.assertTrue(d+" not found", fs2.exists(d));
|
||||
Assert.assertTrue(pRenamedAgain +" not found", fs2.exists(pRenamedAgain));
|
||||
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
|
||||
Assert.assertEquals(1, leaseCount(cluster));
|
||||
|
||||
// rename with opts to non-existent dir
|
||||
pRenamed = pRenamedAgain;
|
||||
pRenamedAgain = new Path(d2, p.getName());
|
||||
fs2.rename(pRenamed.getParent(), d2, Options.Rename.OVERWRITE);
|
||||
// src gone
|
||||
Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
|
||||
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
|
||||
// dst checks
|
||||
Assert.assertTrue(d2+" not found", fs2.exists(d2));
|
||||
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
|
||||
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
|
||||
Assert.assertEquals(1, leaseCount(cluster));
|
||||
|
||||
// rename with opts to existing dir
|
||||
// NOTE: rename with options will not move paths into the existing dir
|
||||
pRenamed = pRenamedAgain;
|
||||
pRenamedAgain = new Path(d, p.getName());
|
||||
fs2.rename(pRenamed.getParent(), d, Options.Rename.OVERWRITE);
|
||||
// src gone
|
||||
Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
|
||||
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
|
||||
// dst checks
|
||||
Assert.assertTrue(d+" not found", fs2.exists(d));
|
||||
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
|
||||
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
|
||||
Assert.assertEquals(1, leaseCount(cluster));
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLease() throws Exception {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
|
Loading…
Reference in New Issue
Block a user