MAPREDUCE-2624. Update RAID for HDFS-2107.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1140942 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
09b6f98de4
commit
61fa4153dc
@ -312,6 +312,8 @@ Trunk (unreleased changes)
|
||||
|
||||
MAPREDUCE-2620. Update RAID for HDFS-2087. (szetszwo)
|
||||
|
||||
MAPREDUCE-2624. Update RAID for HDFS-2107. (szetszwo)
|
||||
|
||||
Release 0.22.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -15,7 +15,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
@ -36,6 +36,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.*;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.apache.hadoop.raid.RaidNode;
|
||||
@ -105,7 +106,7 @@ DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas,
|
||||
}
|
||||
|
||||
@Override
|
||||
DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas,
|
||||
public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas,
|
||||
DatanodeDescriptor writer, List<DatanodeDescriptor> chosenNodes,
|
||||
boolean returnChosenNodes,
|
||||
HashMap<Node, Node> excludedNodes, long blocksize) {
|
||||
@ -477,8 +478,8 @@ static class CachedLocatedBlocks extends Cache<String, List<LocatedBlock>> {
|
||||
}
|
||||
@Override
|
||||
public List<LocatedBlock> getDirectly(String file) throws IOException {
|
||||
long len = namesystem.getFileInfo(file, true).getLen();
|
||||
List<LocatedBlock> result = namesystem.getBlockLocations(
|
||||
long len = NameNodeRaidUtil.getFileInfo(namesystem, file, true).getLen();
|
||||
List<LocatedBlock> result = NameNodeRaidUtil.getBlockLocations(namesystem,
|
||||
file, 0L, len, false, false).getLocatedBlocks();
|
||||
if (result == null || result.isEmpty()) {
|
||||
result = new ArrayList<LocatedBlock>();
|
||||
@ -542,7 +543,7 @@ String getSourceFile(String parity, String prefix) throws IOException {
|
||||
}
|
||||
// remove the prefix
|
||||
String src = parity.substring(prefix.length());
|
||||
if (namesystem.dir.getFileInfo(src, true) == null) {
|
||||
if (NameNodeRaidUtil.getFileInfo(namesystem.dir, src, true) == null) {
|
||||
return null;
|
||||
}
|
||||
return src;
|
||||
@ -574,7 +575,7 @@ String getParityFile(String src) throws IOException {
|
||||
private String getParityFile(String parityPrefix, String src)
|
||||
throws IOException {
|
||||
String parity = parityPrefix + src;
|
||||
if (namesystem.dir.getFileInfo(parity, true) == null) {
|
||||
if (NameNodeRaidUtil.getFileInfo(namesystem.dir, parity, true) == null) {
|
||||
return null;
|
||||
}
|
||||
return parity;
|
@ -0,0 +1,54 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import org.apache.hadoop.classification.*;
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.hdfs.protocol.*;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
/** Utilities used by RAID for accessing NameNode. */
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class NameNodeRaidUtil {
|
||||
/** Accessing FSDirectory.getFileInfo(..) */
|
||||
public static HdfsFileStatus getFileInfo(final FSDirectory dir,
|
||||
final String src, final boolean resolveLink
|
||||
) throws UnresolvedLinkException {
|
||||
return dir.getFileInfo(src, resolveLink);
|
||||
}
|
||||
|
||||
/** Accessing FSNamesystem.getFileInfo(..) */
|
||||
public static HdfsFileStatus getFileInfo(final FSNamesystem namesystem,
|
||||
final String src, final boolean resolveLink
|
||||
) throws AccessControlException, UnresolvedLinkException {
|
||||
return namesystem.getFileInfo(src, resolveLink);
|
||||
}
|
||||
|
||||
/** Accessing FSNamesystem.getBlockLocations(..) */
|
||||
public static LocatedBlocks getBlockLocations(final FSNamesystem namesystem,
|
||||
final String src, final long offset, final long length,
|
||||
final boolean doAccessTime, final boolean needBlockToken
|
||||
) throws FileNotFoundException, UnresolvedLinkException, IOException {
|
||||
return namesystem.getBlockLocations(src, offset, length,
|
||||
doAccessTime, needBlockToken);
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
@ -40,9 +40,10 @@
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid.CachedFullPathNames;
|
||||
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid.CachedLocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid.FileType;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedFullPathNames;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedLocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.FileType;
|
||||
import org.apache.hadoop.hdfs.server.namenode.*;
|
||||
import org.apache.hadoop.raid.RaidNode;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -233,12 +234,12 @@ public void testCachedPathNames() throws IOException {
|
||||
new CachedFullPathNames(namesystem);
|
||||
FSInodeInfo inode1 = null;
|
||||
FSInodeInfo inode2 = null;
|
||||
namesystem.dir.readLock();
|
||||
NameNodeRaidTestUtil.readLock(namesystem.dir);
|
||||
try {
|
||||
inode1 = namesystem.dir.rootDir.getNode(file1, true);
|
||||
inode2 = namesystem.dir.rootDir.getNode(file2, true);
|
||||
inode1 = NameNodeRaidTestUtil.getNode(namesystem.dir, file1, true);
|
||||
inode2 = NameNodeRaidTestUtil.getNode(namesystem.dir, file2, true);
|
||||
} finally {
|
||||
namesystem.dir.readUnlock();
|
||||
NameNodeRaidTestUtil.readUnLock(namesystem.dir);
|
||||
}
|
||||
verifyCachedFullPathNameResult(cachedFullPathNames, inode1);
|
||||
verifyCachedFullPathNameResult(cachedFullPathNames, inode1);
|
||||
@ -335,7 +336,7 @@ public void testDeleteReplica() throws IOException {
|
||||
setBlockPlacementPolicy(namesystem, new BlockPlacementPolicyDefault(
|
||||
conf, namesystem, namesystem.clusterMap));
|
||||
DatanodeDescriptor datanode1 =
|
||||
namesystem.datanodeMap.values().iterator().next();
|
||||
NameNodeRaidTestUtil.getDatanodeMap(namesystem).values().iterator().next();
|
||||
String source = "/dir/file";
|
||||
String parity = xorPrefix + source;
|
||||
|
||||
@ -346,7 +347,7 @@ public void testDeleteReplica() throws IOException {
|
||||
// start one more datanode
|
||||
cluster.startDataNodes(conf, 1, true, null, rack2, host2, null);
|
||||
DatanodeDescriptor datanode2 = null;
|
||||
for (DatanodeDescriptor d : namesystem.datanodeMap.values()) {
|
||||
for (DatanodeDescriptor d : NameNodeRaidTestUtil.getDatanodeMap(namesystem).values()) {
|
||||
if (!d.getName().equals(datanode1.getName())) {
|
||||
datanode2 = d;
|
||||
}
|
||||
@ -483,8 +484,8 @@ private void verifyCachedFullPathNameResult(
|
||||
|
||||
private void verifyCachedBlocksResult(CachedLocatedBlocks cachedBlocks,
|
||||
FSNamesystem namesystem, String file) throws IOException{
|
||||
long len = namesystem.getFileInfo(file, true).getLen();
|
||||
List<LocatedBlock> res1 = namesystem.getBlockLocations(
|
||||
long len = NameNodeRaidUtil.getFileInfo(namesystem, file, true).getLen();
|
||||
List<LocatedBlock> res1 = NameNodeRaidUtil.getBlockLocations(namesystem,
|
||||
file, 0L, len, false, false).getLocatedBlocks();
|
||||
List<LocatedBlock> res2 = cachedBlocks.get(file);
|
||||
for (int i = 0; i < res1.size(); i++) {
|
||||
@ -506,8 +507,8 @@ private Collection<LocatedBlock> getCompanionBlocks(
|
||||
|
||||
private List<LocatedBlock> getBlocks(FSNamesystem namesystem, String file)
|
||||
throws IOException {
|
||||
long len = namesystem.getFileInfo(file, true).getLen();
|
||||
return namesystem.getBlockLocations(
|
||||
long len = NameNodeRaidUtil.getFileInfo(namesystem, file, true).getLen();
|
||||
return NameNodeRaidUtil.getBlockLocations(namesystem,
|
||||
file, 0, len, false, false).getLocatedBlocks();
|
||||
}
|
||||
}
|
@ -0,0 +1,49 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.hadoop.classification.*;
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.hdfs.protocol.*;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.*;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
public class NameNodeRaidTestUtil {
|
||||
public static void readLock(final FSDirectory dir) {
|
||||
dir.readLock();
|
||||
}
|
||||
|
||||
public static void readUnLock(final FSDirectory dir) {
|
||||
dir.readUnlock();
|
||||
}
|
||||
|
||||
public static FSInodeInfo getNode(final FSDirectory dir,
|
||||
final String src, final boolean resolveLink
|
||||
) throws UnresolvedLinkException {
|
||||
return dir.rootDir.getNode(src, resolveLink);
|
||||
}
|
||||
|
||||
public static NavigableMap<String, DatanodeDescriptor> getDatanodeMap(
|
||||
final FSNamesystem namesystem) {
|
||||
return namesystem.datanodeMap;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user