HDFS-8269. getBlockLocations() does not resolve the .reserved path and generates incorrect edit logs when updating the atime. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2015-04-29 11:12:45 -07:00
parent 7947e5b53b
commit 3dd6395bb2
5 changed files with 188 additions and 23 deletions

View File

@ -635,6 +635,9 @@ Release 2.7.1 - UNRELEASED
HDFS-8273. FSNamesystem#Delete() should not call logSync() when holding the
lock. (wheat9)
HDFS-8269. getBlockLocations() does not resolve the .reserved path and
generates incorrect edit logs when updating the atime. (wheat9)
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES

View File

@ -1697,13 +1697,14 @@ void setOwner(String src, String username, String group)
}
static class GetBlockLocationsResult {
final INodesInPath iip;
final boolean updateAccessTime;
final LocatedBlocks blocks;
boolean updateAccessTime() {
return iip != null;
return updateAccessTime;
}
private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
this.iip = iip;
private GetBlockLocationsResult(
boolean updateAccessTime, LocatedBlocks blocks) {
this.updateAccessTime = updateAccessTime;
this.blocks = blocks;
}
}
@ -1712,34 +1713,58 @@ private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
LocatedBlocks getBlockLocations(String clientMachine, String src,
LocatedBlocks getBlockLocations(String clientMachine, String srcArg,
long offset, long length) throws IOException {
checkOperation(OperationCategory.READ);
GetBlockLocationsResult res = null;
FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
res = getBlockLocations(src, offset, length, true, true);
res = getBlockLocations(pc, srcArg, offset, length, true, true);
} catch (AccessControlException e) {
logAuditEvent(false, "open", src);
logAuditEvent(false, "open", srcArg);
throw e;
} finally {
readUnlock();
}
logAuditEvent(true, "open", src);
logAuditEvent(true, "open", srcArg);
if (res.updateAccessTime()) {
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
srcArg);
String src = srcArg;
writeLock();
final long now = now();
try {
checkOperation(OperationCategory.WRITE);
INode inode = res.iip.getLastINode();
boolean updateAccessTime = now > inode.getAccessTime() +
getAccessTimePrecision();
/**
* Resolve the path again and update the atime only when the file
* exists.
*
* XXX: Races can still occur even after resolving the path again.
* For example:
*
* <ul>
* <li>Get the block location for "/a/b"</li>
* <li>Rename "/a/b" to "/c/b"</li>
* <li>The second resolution still points to "/a/b", which is
* wrong.</li>
* </ul>
*
* The behavior is incorrect but consistent with the one before
* HDFS-7463. A better fix is to change the edit log of SetTime to
* use inode id instead of a path.
*/
src = dir.resolvePath(pc, srcArg, pathComponents);
final INodesInPath iip = dir.getINodesInPath(src, true);
INode inode = iip.getLastINode();
boolean updateAccessTime = inode != null &&
now > inode.getAccessTime() + getAccessTimePrecision();
if (!isInSafeMode() && updateAccessTime) {
boolean changed = FSDirAttrOp.setTimes(dir,
inode, -1, now, false, res.iip.getLatestSnapshotId());
inode, -1, now, false, iip.getLatestSnapshotId());
if (changed) {
getEditLog().logTimes(src, -1, now);
}
@ -1773,8 +1798,8 @@ LocatedBlocks getBlockLocations(String clientMachine, String src,
* @throws IOException
*/
GetBlockLocationsResult getBlockLocations(
String src, long offset, long length, boolean needBlockToken,
boolean checkSafeMode) throws IOException {
FSPermissionChecker pc, String src, long offset, long length,
boolean needBlockToken, boolean checkSafeMode) throws IOException {
if (offset < 0) {
throw new HadoopIllegalArgumentException(
"Negative offset is not supported. File: " + src);
@ -1784,7 +1809,7 @@ GetBlockLocationsResult getBlockLocations(
"Negative length is not supported. File: " + src);
}
final GetBlockLocationsResult ret = getBlockLocationsInt(
src, offset, length, needBlockToken);
pc, src, offset, length, needBlockToken);
if (checkSafeMode && isInSafeMode()) {
for (LocatedBlock b : ret.blocks.getLocatedBlocks()) {
@ -1805,12 +1830,12 @@ GetBlockLocationsResult getBlockLocations(
}
private GetBlockLocationsResult getBlockLocationsInt(
final String srcArg, long offset, long length, boolean needBlockToken)
FSPermissionChecker pc, final String srcArg, long offset, long length,
boolean needBlockToken)
throws IOException {
String src = srcArg;
FSPermissionChecker pc = getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = dir.resolvePath(pc, src, pathComponents);
src = dir.resolvePath(pc, srcArg, pathComponents);
final INodesInPath iip = dir.getINodesInPath(src, true);
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (isPermissionEnabled) {
@ -1846,7 +1871,7 @@ private GetBlockLocationsResult getBlockLocationsInt(
boolean updateAccessTime = isAccessTimeSupported() && !isInSafeMode()
&& !iip.isSnapshot()
&& now > inode.getAccessTime() + getAccessTimePrecision();
return new GetBlockLocationsResult(updateAccessTime ? iip : null, blocks);
return new GetBlockLocationsResult(updateAccessTime, blocks);
}
/**

View File

@ -484,7 +484,9 @@ private LocatedBlocks getBlockLocations(String path, HdfsFileStatus file)
FSNamesystem fsn = namenode.getNamesystem();
fsn.readLock();
try {
blocks = fsn.getBlockLocations(path, 0, fileLen, false, false).blocks;
blocks = fsn.getBlockLocations(
fsn.getPermissionChecker(), path, 0, fileLen, false, false)
.blocks;
} catch (FileNotFoundException fnfe) {
blocks = null;
} finally {

View File

@ -24,6 +24,7 @@
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
@ -1156,8 +1157,9 @@ public void testFsckFileNotFound() throws Exception {
DatanodeManager dnManager = mock(DatanodeManager.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockLocations(
anyString(), anyLong(), anyLong(), anyBoolean(), anyBoolean()))
when(fsName.getBlockLocations(any(FSPermissionChecker.class), anyString(),
anyLong(), anyLong(),
anyBoolean(), anyBoolean()))
.thenThrow(new FileNotFoundException());
when(fsName.getBlockManager()).thenReturn(blockManager);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);

View File

@ -0,0 +1,133 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.IOException;
import java.util.ArrayList;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
import static org.apache.hadoop.util.Time.now;
import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestGetBlockLocations {
private static final String FILE_NAME = "foo";
private static final String FILE_PATH = "/" + FILE_NAME;
private static final long MOCK_INODE_ID = 16386;
private static final String RESERVED_PATH =
"/.reserved/.inodes/" + MOCK_INODE_ID;
@Test(timeout = 30000)
public void testResolveReservedPath() throws IOException {
FSNamesystem fsn = setupFileSystem();
FSEditLog editlog = fsn.getEditLog();
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog).logTimes(eq(FILE_PATH), anyLong(), anyLong());
fsn.close();
}
@Test(timeout = 30000)
public void testGetBlockLocationsRacingWithDelete() throws IOException {
FSNamesystem fsn = spy(setupFileSystem());
final FSDirectory fsd = fsn.getFSDirectory();
FSEditLog editlog = fsn.getEditLog();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
INodesInPath iip = fsd.getINodesInPath(FILE_PATH, true);
FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
new ArrayList<INode>(), now());
invocation.callRealMethod();
return null;
}
}).when(fsn).writeLock();
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog, never()).logTimes(anyString(), anyLong(), anyLong());
fsn.close();
}
@Test(timeout = 30000)
public void testGetBlockLocationsRacingWithRename() throws IOException {
FSNamesystem fsn = spy(setupFileSystem());
final FSDirectory fsd = fsn.getFSDirectory();
FSEditLog editlog = fsn.getEditLog();
final String DST_PATH = "/bar";
final boolean[] renamed = new boolean[1];
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
invocation.callRealMethod();
if (!renamed[0]) {
FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
DST_PATH, new INode.BlocksMapUpdateInfo(),
false);
renamed[0] = true;
}
return null;
}
}).when(fsn).writeLock();
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog).logTimes(eq(DST_PATH), anyLong(), anyLong());
fsn.close();
}
private static FSNamesystem setupFileSystem() throws IOException {
Configuration conf = new Configuration();
conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
FSEditLog editlog = mock(FSEditLog.class);
FSImage image = mock(FSImage.class);
when(image.getEditLog()).thenReturn(editlog);
final FSNamesystem fsn = new FSNamesystem(conf, image, true);
final FSDirectory fsd = fsn.getFSDirectory();
INodesInPath iip = fsd.getINodesInPath("/", true);
PermissionStatus perm = new PermissionStatus(
"hdfs", "supergroup",
FsPermission.createImmutable((short) 0x1ff));
final INodeFile file = new INodeFile(
MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1,
DFS_BLOCK_SIZE_DEFAULT);
fsn.getFSDirectory().addINode(iip, file);
return fsn;
}
}