Merge trunk into HDFS-1623 branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1204794 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
6a0671977b
@ -60,6 +60,8 @@ Trunk (unreleased changes)
|
||||
HADOOP-7688. Add servlet handler check in HttpServer.start().
|
||||
(Uma Maheswara Rao G via szetszwo)
|
||||
|
||||
HADOOP-7590. Mavenize streaming and MR examples. (tucu)
|
||||
|
||||
BUGS
|
||||
|
||||
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
|
||||
@ -93,6 +95,9 @@ Trunk (unreleased changes)
|
||||
HADOOP-7770. ViewFS getFileChecksum throws FileNotFoundException for files in
|
||||
/tmp and /user. (Ravi Prakash via jitendra)
|
||||
|
||||
HADOOP-7833. Fix findbugs warnings in protobuf generated code.
|
||||
(John Lee via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
@ -123,6 +128,9 @@ Release 0.23.1 - Unreleased
|
||||
HADOOP-7787. Make source tarball use conventional name.
|
||||
(Bruno Mahé via tomwhite)
|
||||
|
||||
HADOOP-6614. RunJar should provide more diags when it can't create
|
||||
a temp file. (Jonathan Hsieh via eli)
|
||||
|
||||
Release 0.23.0 - 2011-11-01
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -1356,6 +1364,8 @@ Release 0.22.0 - Unreleased
|
||||
HADOOP-7457. Remove out-of-date Chinese language documentation.
|
||||
(Jakob Homan via eli)
|
||||
|
||||
HADOOP-7783. Add more symlink tests that cover intermediate links. (eli)
|
||||
|
||||
Release 0.21.1 - Unreleased
|
||||
|
||||
IMPROVEMENTS
|
||||
|
@ -272,6 +272,6 @@
|
||||
</Match>
|
||||
<Match>
|
||||
<!-- protobuf generated code -->
|
||||
<Class name="org.apache.hadoop.ipc.protobuf.HadoopRpcProtos"/>
|
||||
<Class name="~org\.apache\.hadoop\.ipc\.protobuf\.HadoopRpcProtos.*"/>
|
||||
</Match>
|
||||
</FindBugsFilter>
|
||||
|
@ -1092,28 +1092,27 @@ public FileStatus next(final AbstractFileSystem fs, final Path p)
|
||||
* Return a fully qualified version of the given symlink target if it
|
||||
* has no scheme and authority. Partially and fully qualified paths
|
||||
* are returned unmodified.
|
||||
* @param linkFS The AbstractFileSystem of link
|
||||
* @param link The path of the symlink
|
||||
* @param target The symlink's target
|
||||
* @param pathFS The AbstractFileSystem of the path
|
||||
* @param pathWithLink Path that contains the symlink
|
||||
* @param target The symlink's absolute target
|
||||
* @return Fully qualified version of the target.
|
||||
*/
|
||||
private Path qualifySymlinkTarget(final AbstractFileSystem linkFS,
|
||||
Path link, Path target) {
|
||||
/* NB: makeQualified uses link's scheme/authority, if specified,
|
||||
* and the scheme/authority of linkFS, if not. If link does have
|
||||
* a scheme and authority they should match those of linkFS since
|
||||
* resolve updates the path and file system of a path that contains
|
||||
* links each time a link is encountered.
|
||||
private Path qualifySymlinkTarget(final AbstractFileSystem pathFS,
|
||||
Path pathWithLink, Path target) {
|
||||
/* NB: makeQualified uses the target's scheme and authority, if
|
||||
* specified, and the scheme and authority of pathFS, if not. If
|
||||
* the path does have a scheme and authority we assert they match
|
||||
* those of pathFS since resolve updates the file system of a path
|
||||
* that contains links each time a link is encountered.
|
||||
*/
|
||||
final String linkScheme = link.toUri().getScheme();
|
||||
final String linkAuth = link.toUri().getAuthority();
|
||||
if (linkScheme != null && linkAuth != null) {
|
||||
assert linkScheme.equals(linkFS.getUri().getScheme());
|
||||
assert linkAuth.equals(linkFS.getUri().getAuthority());
|
||||
final String scheme = target.toUri().getScheme();
|
||||
final String auth = target.toUri().getAuthority();
|
||||
if (scheme != null && auth != null) {
|
||||
assert scheme.equals(pathFS.getUri().getScheme());
|
||||
assert auth.equals(pathFS.getUri().getAuthority());
|
||||
}
|
||||
final boolean justPath = (target.toUri().getScheme() == null &&
|
||||
target.toUri().getAuthority() == null);
|
||||
return justPath ? target.makeQualified(linkFS.getUri(), link.getParent())
|
||||
return (scheme == null && auth == null)
|
||||
? target.makeQualified(pathFS.getUri(), pathWithLink.getParent())
|
||||
: target;
|
||||
}
|
||||
|
||||
@ -1148,16 +1147,19 @@ public FileStatus next(final AbstractFileSystem fs, final Path p)
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the un-interpreted target of the given symbolic link.
|
||||
* Transparently resolves all links up to the final path component.
|
||||
* @param f
|
||||
* Returns the target of the given symbolic link as it was specified
|
||||
* when the link was created. Links in the path leading up to the
|
||||
* final path component are resolved transparently.
|
||||
*
|
||||
* @param f the path to return the target of
|
||||
* @return The un-interpreted target of the symbolic link.
|
||||
*
|
||||
* @throws AccessControlException If access is denied
|
||||
* @throws FileNotFoundException If path <code>f</code> does not exist
|
||||
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
|
||||
* not supported
|
||||
* @throws IOException If an I/O error occurred
|
||||
* @throws IOException If the given path does not refer to a symlink
|
||||
* or an I/O error occurred
|
||||
*/
|
||||
public Path getLinkTarget(final Path f) throws AccessControlException,
|
||||
FileNotFoundException, UnsupportedFileSystemException, IOException {
|
||||
@ -1277,7 +1279,7 @@ public FsStatus next(final AbstractFileSystem fs, final Path p)
|
||||
* getFsStatus, getFileStatus, exists, and listStatus.
|
||||
*
|
||||
* Symlink targets are stored as given to createSymlink, assuming the
|
||||
* underlying file system is capable of storign a fully qualified URI.
|
||||
* underlying file system is capable of storing a fully qualified URI.
|
||||
* Dangling symlinks are permitted. FileContext supports four types of
|
||||
* symlink targets, and resolves them as follows
|
||||
* <pre>
|
||||
|
@ -68,13 +68,14 @@ public Path(Path parent, Path child) {
|
||||
// Add a slash to parent's path so resolution is compatible with URI's
|
||||
URI parentUri = parent.uri;
|
||||
String parentPath = parentUri.getPath();
|
||||
if (!(parentPath.equals("/") || parentPath.equals("")))
|
||||
if (!(parentPath.equals("/") || parentPath.equals(""))) {
|
||||
try {
|
||||
parentUri = new URI(parentUri.getScheme(), parentUri.getAuthority(),
|
||||
parentUri.getPath()+"/", null, parentUri.getFragment());
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
URI resolved = parentUri.resolve(child.uri);
|
||||
initialize(resolved.getScheme(), resolved.getAuthority(),
|
||||
resolved.getPath(), resolved.getFragment());
|
||||
@ -213,7 +214,8 @@ public boolean isUriPathAbsolute() {
|
||||
* There is some ambiguity here. An absolute path is a slash
|
||||
* relative name without a scheme or an authority.
|
||||
* So either this method was incorrectly named or its
|
||||
* implementation is incorrect.
|
||||
* implementation is incorrect. This method returns true
|
||||
* even if there is a scheme and authority.
|
||||
*/
|
||||
public boolean isAbsolute() {
|
||||
return isUriPathAbsolute();
|
||||
@ -307,19 +309,16 @@ public int depth() {
|
||||
return depth;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a qualified path object.
|
||||
*
|
||||
* Deprecated - use {@link #makeQualified(URI, Path)}
|
||||
*/
|
||||
|
||||
@Deprecated
|
||||
public Path makeQualified(FileSystem fs) {
|
||||
return makeQualified(fs.getUri(), fs.getWorkingDirectory());
|
||||
}
|
||||
|
||||
|
||||
/** Returns a qualified path object. */
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
public Path makeQualified(URI defaultUri, Path workingDir ) {
|
||||
|
@ -149,7 +149,18 @@ public static void main(String[] args) throws Throwable {
|
||||
File tmpDir = new File(new Configuration().get("hadoop.tmp.dir"));
|
||||
ensureDirectory(tmpDir);
|
||||
|
||||
final File workDir = File.createTempFile("hadoop-unjar", "", tmpDir);
|
||||
final File workDir;
|
||||
try {
|
||||
workDir = File.createTempFile("hadoop-unjar", "", tmpDir);
|
||||
} catch (IOException ioe) {
|
||||
// If user has insufficient perms to write to tmpDir, default
|
||||
// "Permission denied" message doesn't specify a filename.
|
||||
System.err.println("Error creating temp dir in hadoop.tmp.dir "
|
||||
+ tmpDir + " due to " + ioe.getMessage());
|
||||
System.exit(-1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!workDir.delete()) {
|
||||
System.err.println("Delete failed for " + workDir);
|
||||
System.exit(-1);
|
||||
|
@ -28,8 +28,9 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import static org.apache.hadoop.fs.FileContextTestHelper.*;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
import org.junit.Test;
|
||||
import org.junit.Before;
|
||||
import org.junit.After;
|
||||
@ -238,6 +239,31 @@ public void testStatLinkToFile() throws IOException {
|
||||
assertFalse(isDir(fc, linkToFile));
|
||||
assertEquals(file.toUri().getPath(),
|
||||
fc.getLinkTarget(linkToFile).toString());
|
||||
// The local file system does not fully resolve the link
|
||||
// when obtaining the file status
|
||||
if (!"file".equals(getScheme())) {
|
||||
assertEquals(fc.getFileStatus(file), fc.getFileStatus(linkToFile));
|
||||
assertEquals(fc.makeQualified(file),
|
||||
fc.getFileStatus(linkToFile).getPath());
|
||||
assertEquals(fc.makeQualified(linkToFile),
|
||||
fc.getFileLinkStatus(linkToFile).getPath());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
/** Stat a relative link to a file */
|
||||
public void testStatRelLinkToFile() throws IOException {
|
||||
assumeTrue(!"file".equals(getScheme()));
|
||||
Path baseDir = new Path(testBaseDir1());
|
||||
Path file = new Path(testBaseDir1(), "file");
|
||||
Path linkToFile = new Path(testBaseDir1(), "linkToFile");
|
||||
createAndWriteFile(file);
|
||||
fc.createSymlink(new Path("file"), linkToFile, false);
|
||||
assertEquals(fc.getFileStatus(file), fc.getFileStatus(linkToFile));
|
||||
assertEquals(fc.makeQualified(file),
|
||||
fc.getFileStatus(linkToFile).getPath());
|
||||
assertEquals(fc.makeQualified(linkToFile),
|
||||
fc.getFileLinkStatus(linkToFile).getPath());
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -474,16 +500,13 @@ public void testCreateLinkUsingFullyQualPaths() throws IOException {
|
||||
* creating using a partially qualified path is file system specific.
|
||||
*/
|
||||
public void testCreateLinkUsingPartQualPath1() throws IOException {
|
||||
// Partially qualified paths are covered for local file systems
|
||||
// in the previous test.
|
||||
assumeTrue(!"file".equals(getScheme()));
|
||||
Path schemeAuth = new Path(testURI().toString());
|
||||
Path fileWoHost = new Path(getScheme()+"://"+testBaseDir1()+"/file");
|
||||
Path link = new Path(testBaseDir1()+"/linkToFile");
|
||||
Path linkQual = new Path(schemeAuth, testBaseDir1()+"/linkToFile");
|
||||
|
||||
// Partially qualified paths are covered for local file systems
|
||||
// in the previous test.
|
||||
if ("file".equals(getScheme())) {
|
||||
return;
|
||||
}
|
||||
FileContext localFc = FileContext.getLocalFSFileContext();
|
||||
|
||||
fc.createSymlink(fileWoHost, link, false);
|
||||
@ -748,7 +771,7 @@ public void testCreateLinkToDotDot() throws IOException {
|
||||
}
|
||||
|
||||
@Test
|
||||
/** Test create symlink to ../foo */
|
||||
/** Test create symlink to ../file */
|
||||
public void testCreateLinkToDotDotPrefix() throws IOException {
|
||||
Path file = new Path(testBaseDir1(), "file");
|
||||
Path dir = new Path(testBaseDir1(), "test");
|
||||
@ -1205,24 +1228,30 @@ public void testRenameFileWithDestParentSymlink() throws IOException {
|
||||
}
|
||||
|
||||
@Test
|
||||
/** Operate on a file using a path with an intermediate symlink */
|
||||
public void testAccessFileViaSymlink() throws IOException {
|
||||
/**
|
||||
* Create, write, read, append, rename, get the block locations,
|
||||
* checksums, and delete a file using a path with a symlink as an
|
||||
* intermediate path component where the link target was specified
|
||||
* using an absolute path. Rename is covered in more depth below.
|
||||
*/
|
||||
public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {
|
||||
Path baseDir = new Path(testBaseDir1());
|
||||
Path file = new Path(testBaseDir1(), "file");
|
||||
Path fileNew = new Path(baseDir, "fileNew");
|
||||
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
|
||||
Path fileViaLink = new Path(linkToDir, "file");
|
||||
Path fileNewViaLink = new Path(linkToDir, "fileNew");
|
||||
fc.createSymlink(baseDir, linkToDir, false);
|
||||
// Create, write, read, append, rename, get block locations and
|
||||
// checksums, and delete a file using a path that contains a
|
||||
// symlink as an intermediate path component. Rename is covered
|
||||
// in more depth below.
|
||||
createAndWriteFile(fileViaLink);
|
||||
assertTrue(exists(fc, fileViaLink));
|
||||
assertTrue(isFile(fc, fileViaLink));
|
||||
assertFalse(isDir(fc, fileViaLink));
|
||||
assertFalse(fc.getFileLinkStatus(fileViaLink).isSymlink());
|
||||
assertFalse(isDir(fc, fileViaLink));
|
||||
assertEquals(fc.getFileStatus(file),
|
||||
fc.getFileLinkStatus(file));
|
||||
assertEquals(fc.getFileStatus(fileViaLink),
|
||||
fc.getFileLinkStatus(fileViaLink));
|
||||
readFile(fileViaLink);
|
||||
appendToFile(fileViaLink);
|
||||
fc.rename(fileViaLink, fileNewViaLink);
|
||||
@ -1237,6 +1266,58 @@ public void testAccessFileViaSymlink() throws IOException {
|
||||
assertFalse(exists(fc, fileNewViaLink));
|
||||
}
|
||||
|
||||
@Test
|
||||
/**
|
||||
* Operate on a file using a path with an intermediate symlink where
|
||||
* the link target was specified as a fully qualified path.
|
||||
*/
|
||||
public void testAccessFileViaInterSymlinkQualTarget() throws IOException {
|
||||
Path baseDir = new Path(testBaseDir1());
|
||||
Path file = new Path(testBaseDir1(), "file");
|
||||
Path fileNew = new Path(baseDir, "fileNew");
|
||||
Path linkToDir = new Path(testBaseDir2(), "linkToDir");
|
||||
Path fileViaLink = new Path(linkToDir, "file");
|
||||
Path fileNewViaLink = new Path(linkToDir, "fileNew");
|
||||
fc.createSymlink(fc.makeQualified(baseDir), linkToDir, false);
|
||||
createAndWriteFile(fileViaLink);
|
||||
assertEquals(fc.getFileStatus(file),
|
||||
fc.getFileLinkStatus(file));
|
||||
assertEquals(fc.getFileStatus(fileViaLink),
|
||||
fc.getFileLinkStatus(fileViaLink));
|
||||
readFile(fileViaLink);
|
||||
}
|
||||
|
||||
@Test
|
||||
/**
|
||||
* Operate on a file using a path with an intermediate symlink where
|
||||
* the link target was specified as a relative path.
|
||||
*/
|
||||
public void testAccessFileViaInterSymlinkRelTarget() throws IOException {
|
||||
assumeTrue(!"file".equals(getScheme()));
|
||||
Path baseDir = new Path(testBaseDir1());
|
||||
Path dir = new Path(testBaseDir1(), "dir");
|
||||
Path file = new Path(dir, "file");
|
||||
Path linkToDir = new Path(testBaseDir1(), "linkToDir");
|
||||
Path fileViaLink = new Path(linkToDir, "file");
|
||||
|
||||
fc.mkdir(dir, FileContext.DEFAULT_PERM, false);
|
||||
fc.createSymlink(new Path("dir"), linkToDir, false);
|
||||
createAndWriteFile(fileViaLink);
|
||||
// Note that getFileStatus returns fully qualified paths even
|
||||
// when called on an absolute path.
|
||||
assertEquals(fc.makeQualified(file),
|
||||
fc.getFileStatus(file).getPath());
|
||||
// In each case getFileLinkStatus returns the same FileStatus
|
||||
// as getFileStatus since we're not calling it on a link and
|
||||
// FileStatus objects are compared by Path.
|
||||
assertEquals(fc.getFileStatus(file),
|
||||
fc.getFileLinkStatus(file));
|
||||
assertEquals(fc.getFileStatus(fileViaLink),
|
||||
fc.getFileLinkStatus(fileViaLink));
|
||||
assertEquals(fc.getFileStatus(fileViaLink),
|
||||
fc.getFileLinkStatus(file));
|
||||
}
|
||||
|
||||
@Test
|
||||
/** Test create, list, and delete a directory through a symlink */
|
||||
public void testAccessDirViaSymlink() throws IOException {
|
||||
|
@ -95,6 +95,7 @@ public void testParent() {
|
||||
assertEquals(new Path("/foo"), new Path("/foo/bar").getParent());
|
||||
assertEquals(new Path("foo"), new Path("foo/bar").getParent());
|
||||
assertEquals(new Path("/"), new Path("/foo").getParent());
|
||||
assertEquals(null, new Path("/").getParent());
|
||||
if (Path.WINDOWS) {
|
||||
assertEquals(new Path("c:/"), new Path("c:/foo").getParent());
|
||||
}
|
||||
@ -160,6 +161,13 @@ public void testDots() {
|
||||
assertEquals(new Path("foo/bar/baz","../../../../..").toString(), "../..");
|
||||
}
|
||||
|
||||
/** Test Path objects created from other Path objects */
|
||||
public void testChildParentResolution() throws URISyntaxException, IOException {
|
||||
Path parent = new Path("foo1://bar1/baz1");
|
||||
Path child = new Path("foo2://bar2/baz2");
|
||||
assertEquals(child, new Path(parent, child));
|
||||
}
|
||||
|
||||
public void testScheme() throws java.io.IOException {
|
||||
assertEquals("foo:/bar", new Path("foo:/","/bar").toString());
|
||||
assertEquals("foo://bar/baz", new Path("foo://bar/","/baz").toString());
|
||||
|
@ -5,6 +5,12 @@ Trunk (unreleased changes)
|
||||
HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel
|
||||
via hairong)
|
||||
|
||||
HDFS-2517. Add protobuf service for JounralProtocol. (suresh)
|
||||
|
||||
HDFS-2518. Add protobuf service for NamenodeProtocol. (suresh)
|
||||
|
||||
HDFS-2520. Add protobuf service for InterDatanodeProtocol. (suresh)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7524 Change RPC to allow multiple protocols including multuple
|
||||
@ -52,6 +58,13 @@ Trunk (unreleased changes)
|
||||
|
||||
HDFS-2334. Add Closeable to JournalManager. (Ivan Kelly via jitendra)
|
||||
|
||||
HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
|
||||
(Hari Mankude via eli)
|
||||
|
||||
HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
|
||||
|
||||
HDFS-2410. Further cleanup of hardcoded configuration keys and values.
|
||||
(suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
HDFS-2477. Optimize computing the diff between a block report and the
|
||||
@ -100,6 +113,9 @@ Trunk (unreleased changes)
|
||||
HDFS-2526. (Client)NamenodeProtocolTranslatorR23 do not need to keep a
|
||||
reference to rpcProxyWithoutRetry (atm)
|
||||
|
||||
HDFS-2532. TestDfsOverAvroRpc timing out in trunk (Uma Maheswara Rao G
|
||||
via todd)
|
||||
|
||||
Release 0.23.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -117,6 +133,24 @@ Release 0.23.1 - UNRELEASED
|
||||
HDFS-2562. Refactor DN configuration variables out of DataNode class
|
||||
(todd)
|
||||
|
||||
HDFS-2563. Some cleanup in BPOfferService. (todd)
|
||||
|
||||
HDFS-208. name node should warn if only one dir is listed in dfs.name.dir.
|
||||
(Uma Maheswara Rao G via eli)
|
||||
|
||||
HDFS-2568. Use a set to manage child sockets in XceiverServer.
|
||||
(harsh via eli)
|
||||
|
||||
HDFS-2454. Move maxXceiverCount check to before starting the
|
||||
thread in dataXceiver. (harsh via eli)
|
||||
|
||||
HDFS-2570. Add descriptions for dfs.*.https.address in hdfs-default.xml.
|
||||
(eli)
|
||||
|
||||
HDFS-2536. Remove unused imports. (harsh via eli)
|
||||
|
||||
HDFS-2566. Move BPOfferService to be a non-inner class. (todd)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
||||
@ -127,8 +161,23 @@ Release 0.23.1 - UNRELEASED
|
||||
HDFS-2129. Simplify BlockReader to not inherit from FSInputChecker.
|
||||
(todd)
|
||||
|
||||
HDFS-2246. Enable reading a block directly from local file system
|
||||
for a client on the same node as the block file. (Andrew Purtell,
|
||||
Suresh Srinivas and Jitendra Nath Pandey via szetszwo)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-2541. For a sufficiently large value of blocks, the DN Scanner
|
||||
may request a random number with a negative seed value. (harsh via eli)
|
||||
|
||||
HDFS-2502. hdfs-default.xml should include dfs.name.dir.restore.
|
||||
(harsh via eli)
|
||||
|
||||
HDFS-2567. When 0 DNs are available, show a proper error when
|
||||
trying to browse DFS via web UI. (harsh via eli)
|
||||
|
||||
HDFS-2575. DFSTestUtil may create empty files (todd)
|
||||
|
||||
Release 0.23.0 - 2011-11-01
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -1902,6 +1951,11 @@ Release 0.22.0 - Unreleased
|
||||
HDFS-2002. Incorrect computation of needed blocks in getTurnOffTip().
|
||||
(Plamen Jeliazkov via shv)
|
||||
|
||||
HDFS-2514. Link resolution bug for intermediate symlinks with
|
||||
relative targets. (eli)
|
||||
|
||||
HDFS-2573. TestFiDataXceiverServer is failing, not testing OOME (cos)
|
||||
|
||||
Release 0.21.1 - Unreleased
|
||||
|
||||
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
|
||||
|
@ -0,0 +1,380 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
||||
/**
|
||||
* BlockReaderLocal enables local short circuited reads. If the DFS client is on
|
||||
* the same machine as the datanode, then the client can read files directly
|
||||
* from the local file system rather than going through the datanode for better
|
||||
* performance. <br>
|
||||
* {@link BlockReaderLocal} works as follows:
|
||||
* <ul>
|
||||
* <li>The client performing short circuit reads must be configured at the
|
||||
* datanode.</li>
|
||||
* <li>The client gets the path to the file where block is stored using
|
||||
* {@link org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol#getBlockLocalPathInfo(ExtendedBlock, Token)}
|
||||
* RPC call</li>
|
||||
* <li>Client uses kerberos authentication to connect to the datanode over RPC,
|
||||
* if security is enabled.</li>
|
||||
* </ul>
|
||||
*/
|
||||
class BlockReaderLocal extends RemoteBlockReader2 {
|
||||
public static final Log LOG = LogFactory.getLog(DFSClient.class);
|
||||
|
||||
//Stores the cache and proxy for a local datanode.
|
||||
private static class LocalDatanodeInfo {
|
||||
private ClientDatanodeProtocol proxy = null;
|
||||
private final Map<ExtendedBlock, BlockLocalPathInfo> cache;
|
||||
|
||||
LocalDatanodeInfo() {
|
||||
final int cacheSize = 10000;
|
||||
final float hashTableLoadFactor = 0.75f;
|
||||
int hashTableCapacity = (int) Math.ceil(cacheSize / hashTableLoadFactor) + 1;
|
||||
cache = Collections
|
||||
.synchronizedMap(new LinkedHashMap<ExtendedBlock, BlockLocalPathInfo>(
|
||||
hashTableCapacity, hashTableLoadFactor, true) {
|
||||
private static final long serialVersionUID = 1;
|
||||
|
||||
@Override
|
||||
protected boolean removeEldestEntry(
|
||||
Map.Entry<ExtendedBlock, BlockLocalPathInfo> eldest) {
|
||||
return size() > cacheSize;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private synchronized ClientDatanodeProtocol getDatanodeProxy(
|
||||
DatanodeInfo node, Configuration conf, int socketTimeout)
|
||||
throws IOException {
|
||||
if (proxy == null) {
|
||||
proxy = DFSUtil.createClientDatanodeProtocolProxy(node, conf,
|
||||
socketTimeout);
|
||||
}
|
||||
return proxy;
|
||||
}
|
||||
|
||||
private synchronized void resetDatanodeProxy() {
|
||||
if (null != proxy) {
|
||||
RPC.stopProxy(proxy);
|
||||
proxy = null;
|
||||
}
|
||||
}
|
||||
|
||||
private BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
|
||||
return cache.get(b);
|
||||
}
|
||||
|
||||
private void setBlockLocalPathInfo(ExtendedBlock b, BlockLocalPathInfo info) {
|
||||
cache.put(b, info);
|
||||
}
|
||||
|
||||
private void removeBlockLocalPathInfo(ExtendedBlock b) {
|
||||
cache.remove(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Multiple datanodes could be running on the local machine. Store proxies in
|
||||
// a map keyed by the ipc port of the datanode.
|
||||
private static Map<Integer, LocalDatanodeInfo> localDatanodeInfoMap = new HashMap<Integer, LocalDatanodeInfo>();
|
||||
|
||||
private final FileInputStream dataIn; // reader for the data file
|
||||
|
||||
private FileInputStream checksumIn; // reader for the checksum file
|
||||
|
||||
private int offsetFromChunkBoundary;
|
||||
|
||||
ByteBuffer dataBuff = null;
|
||||
ByteBuffer checksumBuff = null;
|
||||
|
||||
/**
|
||||
* The only way this object can be instantiated.
|
||||
*/
|
||||
static BlockReaderLocal newBlockReader(Configuration conf, String file,
|
||||
ExtendedBlock blk, Token<BlockTokenIdentifier> token, DatanodeInfo node,
|
||||
int socketTimeout, long startOffset, long length) throws IOException {
|
||||
|
||||
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
|
||||
.getIpcPort());
|
||||
// check the cache first
|
||||
BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
|
||||
if (pathinfo == null) {
|
||||
pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token);
|
||||
}
|
||||
|
||||
// check to see if the file exists. It may so happen that the
|
||||
// HDFS file has been deleted and this block-lookup is occurring
|
||||
// on behalf of a new HDFS file. This time, the block file could
|
||||
// be residing in a different portion of the fs.data.dir directory.
|
||||
// In this case, we remove this entry from the cache. The next
|
||||
// call to this method will re-populate the cache.
|
||||
FileInputStream dataIn = null;
|
||||
FileInputStream checksumIn = null;
|
||||
BlockReaderLocal localBlockReader = null;
|
||||
boolean skipChecksumCheck = skipChecksumCheck(conf);
|
||||
try {
|
||||
// get a local file system
|
||||
File blkfile = new File(pathinfo.getBlockPath());
|
||||
dataIn = new FileInputStream(blkfile);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("New BlockReaderLocal for file " + blkfile + " of size "
|
||||
+ blkfile.length() + " startOffset " + startOffset + " length "
|
||||
+ length + " short circuit checksum " + skipChecksumCheck);
|
||||
}
|
||||
|
||||
if (!skipChecksumCheck) {
|
||||
// get the metadata file
|
||||
File metafile = new File(pathinfo.getMetaPath());
|
||||
checksumIn = new FileInputStream(metafile);
|
||||
|
||||
// read and handle the common header here. For now just a version
|
||||
BlockMetadataHeader header = BlockMetadataHeader
|
||||
.readHeader(new DataInputStream(checksumIn));
|
||||
short version = header.getVersion();
|
||||
if (version != FSDataset.METADATA_VERSION) {
|
||||
LOG.warn("Wrong version (" + version + ") for metadata file for "
|
||||
+ blk + " ignoring ...");
|
||||
}
|
||||
DataChecksum checksum = header.getChecksum();
|
||||
long firstChunkOffset = startOffset
|
||||
- (startOffset % checksum.getBytesPerChecksum());
|
||||
localBlockReader = new BlockReaderLocal(conf, file, blk, token,
|
||||
startOffset, length, pathinfo, checksum, true, dataIn,
|
||||
firstChunkOffset, checksumIn);
|
||||
} else {
|
||||
localBlockReader = new BlockReaderLocal(conf, file, blk, token,
|
||||
startOffset, length, pathinfo, dataIn);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// remove from cache
|
||||
localDatanodeInfo.removeBlockLocalPathInfo(blk);
|
||||
DFSClient.LOG.warn("BlockReaderLocal: Removing " + blk
|
||||
+ " from cache because local file " + pathinfo.getBlockPath()
|
||||
+ " could not be opened.");
|
||||
throw e;
|
||||
} finally {
|
||||
if (localBlockReader == null) {
|
||||
if (dataIn != null) {
|
||||
dataIn.close();
|
||||
}
|
||||
if (checksumIn != null) {
|
||||
checksumIn.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
return localBlockReader;
|
||||
}
|
||||
|
||||
private static synchronized LocalDatanodeInfo getLocalDatanodeInfo(int port) {
|
||||
LocalDatanodeInfo ldInfo = localDatanodeInfoMap.get(port);
|
||||
if (ldInfo == null) {
|
||||
ldInfo = new LocalDatanodeInfo();
|
||||
localDatanodeInfoMap.put(port, ldInfo);
|
||||
}
|
||||
return ldInfo;
|
||||
}
|
||||
|
||||
private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
|
||||
DatanodeInfo node, Configuration conf, int timeout,
|
||||
Token<BlockTokenIdentifier> token) throws IOException {
|
||||
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort);
|
||||
BlockLocalPathInfo pathinfo = null;
|
||||
ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
|
||||
conf, timeout);
|
||||
try {
|
||||
// make RPC to local datanode to find local pathnames of blocks
|
||||
pathinfo = proxy.getBlockLocalPathInfo(blk, token);
|
||||
if (pathinfo != null) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Cached location of block " + blk + " as " + pathinfo);
|
||||
}
|
||||
localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
|
||||
throw e;
|
||||
}
|
||||
return pathinfo;
|
||||
}
|
||||
|
||||
private static boolean skipChecksumCheck(Configuration conf) {
|
||||
return conf.getBoolean(
|
||||
DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT);
|
||||
}
|
||||
|
||||
private BlockReaderLocal(Configuration conf, String hdfsfile,
|
||||
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
||||
long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
|
||||
throws IOException {
|
||||
this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
|
||||
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_NULL, 4), false,
|
||||
dataIn, startOffset, null);
|
||||
}
|
||||
|
||||
private BlockReaderLocal(Configuration conf, String hdfsfile,
|
||||
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
||||
long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
|
||||
boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
|
||||
FileInputStream checksumIn) throws IOException {
|
||||
super(hdfsfile, block.getBlockPoolId(), block.getBlockId(), dataIn
|
||||
.getChannel(), checksum, verifyChecksum, startOffset, firstChunkOffset,
|
||||
length, null);
|
||||
this.dataIn = dataIn;
|
||||
this.checksumIn = checksumIn;
|
||||
this.offsetFromChunkBoundary = (int) (startOffset-firstChunkOffset);
|
||||
dataBuff = bufferPool.getBuffer(bytesPerChecksum*64);
|
||||
checksumBuff = bufferPool.getBuffer(checksumSize*64);
|
||||
//Initially the buffers have nothing to read.
|
||||
dataBuff.flip();
|
||||
checksumBuff.flip();
|
||||
long toSkip = firstChunkOffset;
|
||||
while (toSkip > 0) {
|
||||
long skipped = dataIn.skip(toSkip);
|
||||
if (skipped == 0) {
|
||||
throw new IOException("Couldn't initialize input stream");
|
||||
}
|
||||
toSkip -= skipped;
|
||||
}
|
||||
if (checksumIn != null) {
|
||||
long checkSumOffset = (firstChunkOffset / bytesPerChecksum)
|
||||
* checksumSize;
|
||||
while (checkSumOffset > 0) {
|
||||
long skipped = checksumIn.skip(checkSumOffset);
|
||||
if (skipped == 0) {
|
||||
throw new IOException("Couldn't initialize checksum input stream");
|
||||
}
|
||||
checkSumOffset -= skipped;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private int readIntoBuffer(FileInputStream stream, ByteBuffer buf)
|
||||
throws IOException {
|
||||
int bytesRead = stream.getChannel().read(buf);
|
||||
if (bytesRead < 0) {
|
||||
//EOF
|
||||
return bytesRead;
|
||||
}
|
||||
while (buf.remaining() > 0) {
|
||||
int n = stream.getChannel().read(buf);
|
||||
if (n < 0) {
|
||||
//EOF
|
||||
return bytesRead;
|
||||
}
|
||||
bytesRead += n;
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int read(byte[] buf, int off, int len) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("read off " + off + " len " + len);
|
||||
}
|
||||
if (!verifyChecksum) {
|
||||
return dataIn.read(buf, off, len);
|
||||
} else {
|
||||
int dataRead = -1;
|
||||
if (dataBuff.remaining() == 0) {
|
||||
dataBuff.clear();
|
||||
checksumBuff.clear();
|
||||
dataRead = readIntoBuffer(dataIn, dataBuff);
|
||||
readIntoBuffer(checksumIn, checksumBuff);
|
||||
checksumBuff.flip();
|
||||
dataBuff.flip();
|
||||
if (verifyChecksum) {
|
||||
checksum.verifyChunkedSums(dataBuff, checksumBuff, filename,
|
||||
this.startOffset);
|
||||
}
|
||||
} else {
|
||||
dataRead = dataBuff.remaining();
|
||||
}
|
||||
if (dataRead > 0) {
|
||||
int nRead = Math.min(dataRead - offsetFromChunkBoundary, len);
|
||||
if (offsetFromChunkBoundary > 0) {
|
||||
dataBuff.position(offsetFromChunkBoundary);
|
||||
// Its either end of file or dataRead is greater than the
|
||||
// offsetFromChunkBoundary
|
||||
offsetFromChunkBoundary = 0;
|
||||
}
|
||||
if (nRead > 0) {
|
||||
dataBuff.get(buf, off, nRead);
|
||||
return nRead;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long skip(long n) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("skip " + n);
|
||||
}
|
||||
if (!verifyChecksum) {
|
||||
return dataIn.skip(n);
|
||||
} else {
|
||||
return super.skip(n);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
dataIn.close();
|
||||
if (checksumIn != null) {
|
||||
checksumIn.close();
|
||||
}
|
||||
if (dataBuff != null) {
|
||||
bufferPool.returnBuffer(dataBuff);
|
||||
dataBuff = null;
|
||||
}
|
||||
if (checksumBuff != null) {
|
||||
bufferPool.returnBuffer(checksumBuff);
|
||||
checksumBuff = null;
|
||||
}
|
||||
super.close();
|
||||
}
|
||||
}
|
@ -24,13 +24,20 @@
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketException;
|
||||
import java.net.URI;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
@ -77,6 +84,8 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
@ -234,6 +243,8 @@ Conf getConf() {
|
||||
private final Map<String, DFSOutputStream> filesBeingWritten
|
||||
= new HashMap<String, DFSOutputStream>();
|
||||
|
||||
private boolean shortCircuitLocalReads;
|
||||
|
||||
/**
|
||||
* Same as this(NameNode.getAddress(conf), conf);
|
||||
* @see #DFSClient(InetSocketAddress, Configuration)
|
||||
@ -317,6 +328,13 @@ public DFSClient(URI nameNodeUri, Configuration conf,
|
||||
"Expecting exactly one of nameNodeUri and rpcNamenode being null: "
|
||||
+ "nameNodeUri=" + nameNodeUri + ", rpcNamenode=" + rpcNamenode);
|
||||
}
|
||||
// read directly from the block file if configured.
|
||||
this.shortCircuitLocalReads = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Short circuit read is " + shortCircuitLocalReads);
|
||||
}
|
||||
}
|
||||
|
||||
private Class<?> getFailoverProxyProviderClass(String authority, Configuration conf)
|
||||
@ -539,6 +557,82 @@ public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get {@link BlockReader} for short circuited local reads.
|
||||
*/
|
||||
static BlockReader getLocalBlockReader(Configuration conf,
|
||||
String src, ExtendedBlock blk, Token<BlockTokenIdentifier> accessToken,
|
||||
DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock)
|
||||
throws InvalidToken, IOException {
|
||||
try {
|
||||
return BlockReaderLocal.newBlockReader(conf, src, blk, accessToken,
|
||||
chosenNode, socketTimeout, offsetIntoBlock, blk.getNumBytes()
|
||||
- offsetIntoBlock);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(InvalidToken.class,
|
||||
AccessControlException.class);
|
||||
}
|
||||
}
|
||||
|
||||
private static Set<String> localIpAddresses = Collections
|
||||
.synchronizedSet(new HashSet<String>());
|
||||
|
||||
private static boolean isLocalAddress(InetSocketAddress targetAddr) {
|
||||
InetAddress addr = targetAddr.getAddress();
|
||||
if (localIpAddresses.contains(addr.getHostAddress())) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Address " + targetAddr + " is local");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if the address is any local or loop back
|
||||
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
|
||||
|
||||
// Check if the address is defined on any interface
|
||||
if (!local) {
|
||||
try {
|
||||
local = NetworkInterface.getByInetAddress(addr) != null;
|
||||
} catch (SocketException e) {
|
||||
local = false;
|
||||
}
|
||||
}
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Address " + targetAddr + " is local");
|
||||
}
|
||||
if (local == true) {
|
||||
localIpAddresses.add(addr.getHostAddress());
|
||||
}
|
||||
return local;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the block access token be refetched on an exception
|
||||
*
|
||||
* @param ex Exception received
|
||||
* @param targetAddr Target datanode address from where exception was received
|
||||
* @return true if block access token has expired or invalid and it should be
|
||||
* refetched
|
||||
*/
|
||||
private static boolean tokenRefetchNeeded(IOException ex,
|
||||
InetSocketAddress targetAddr) {
|
||||
/*
|
||||
* Get a new access token and retry. Retry is needed in 2 cases. 1) When
|
||||
* both NN and DN re-started while DFSClient holding a cached access token.
|
||||
* 2) In the case that NN fails to update its access key at pre-set interval
|
||||
* (by a wide margin) and subsequently restarts. In this case, DN
|
||||
* re-registers itself with NN and receives a new access key, but DN will
|
||||
* delete the old access key from its memory since it's considered expired
|
||||
* based on the estimated expiration date.
|
||||
*/
|
||||
if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
|
||||
LOG.info("Access token was invalid when connecting to " + targetAddr
|
||||
+ " : " + ex);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel a delegation token
|
||||
* @param token the token to cancel
|
||||
@ -1639,6 +1733,14 @@ public long getVisibleLength() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
boolean shouldTryShortCircuitRead(InetSocketAddress targetAddr)
|
||||
throws IOException {
|
||||
if (shortCircuitLocalReads && isLocalAddress(targetAddr)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
|
||||
DatanodeInfo [] dnArr = { dn };
|
||||
LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
|
||||
@ -1660,4 +1762,8 @@ public String toString() {
|
||||
return getClass().getSimpleName() + "[clientName=" + clientName
|
||||
+ ", ugi=" + ugi + "]";
|
||||
}
|
||||
|
||||
void disableShortCircuit() {
|
||||
shortCircuitLocalReads = false;
|
||||
}
|
||||
}
|
||||
|
@ -262,6 +262,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final String DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max";
|
||||
public static final int DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
|
||||
|
||||
public static final String DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit";
|
||||
public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false;
|
||||
public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum";
|
||||
public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT = false;
|
||||
|
||||
// property for fsimage compression
|
||||
public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress";
|
||||
public static final boolean DFS_IMAGE_COMPRESS_DEFAULT = false;
|
||||
@ -302,6 +307,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final String DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY = "dfs.web.authentication.kerberos.principal";
|
||||
public static final String DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = "dfs.web.authentication.kerberos.keytab";
|
||||
|
||||
public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user";
|
||||
|
||||
// HA related configuration
|
||||
public static final String DFS_HA_NAMENODES_KEY = "dfs.ha.namenodes";
|
||||
public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id";
|
||||
|
@ -46,6 +46,7 @@
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
/****************************************************************
|
||||
@ -405,11 +406,8 @@ private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
|
||||
try {
|
||||
ExtendedBlock blk = targetBlock.getBlock();
|
||||
Token<BlockTokenIdentifier> accessToken = targetBlock.getBlockToken();
|
||||
|
||||
blockReader = getBlockReader(
|
||||
targetAddr, src, blk,
|
||||
accessToken,
|
||||
offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock,
|
||||
blockReader = getBlockReader(targetAddr, chosenNode, src, blk,
|
||||
accessToken, offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock,
|
||||
buffersize, verifyChecksum, dfsClient.clientName);
|
||||
if(connectFailedOnce) {
|
||||
DFSClient.LOG.info("Successfully connected to " + targetAddr +
|
||||
@ -543,7 +541,7 @@ public synchronized int read(byte buf[], int off, int len) throws IOException {
|
||||
if (pos > blockEnd) {
|
||||
currentNode = blockSeekTo(pos);
|
||||
}
|
||||
int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L));
|
||||
int realLen = (int) Math.min(len, (blockEnd - pos + 1L));
|
||||
int result = readBuffer(buf, off, realLen, corruptedBlockMap);
|
||||
|
||||
if (result >= 0) {
|
||||
@ -666,12 +664,9 @@ private void fetchBlockByteRange(LocatedBlock block, long start, long end,
|
||||
Token<BlockTokenIdentifier> blockToken = block.getBlockToken();
|
||||
|
||||
int len = (int) (end - start + 1);
|
||||
|
||||
reader = getBlockReader(targetAddr, src,
|
||||
block.getBlock(),
|
||||
blockToken,
|
||||
start, len, buffersize,
|
||||
verifyChecksum, dfsClient.clientName);
|
||||
reader = getBlockReader(targetAddr, chosenNode, src, block.getBlock(),
|
||||
blockToken, start, len, buffersize, verifyChecksum,
|
||||
dfsClient.clientName);
|
||||
int nread = reader.readAll(buf, offset, len);
|
||||
if (nread != len) {
|
||||
throw new IOException("truncated return from reader.read(): " +
|
||||
@ -684,6 +679,10 @@ private void fetchBlockByteRange(LocatedBlock block, long start, long end,
|
||||
e.getPos() + " from " + chosenNode.getName());
|
||||
// we want to remember what we have tried
|
||||
addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap);
|
||||
} catch (AccessControlException ex) {
|
||||
DFSClient.LOG.warn("Short circuit access failed ", ex);
|
||||
dfsClient.disableShortCircuit();
|
||||
continue;
|
||||
} catch (IOException e) {
|
||||
if (e instanceof InvalidBlockTokenException && refetchToken > 0) {
|
||||
DFSClient.LOG.info("Will get a new access token and retry, "
|
||||
@ -726,6 +725,7 @@ private void closeBlockReader(BlockReader reader) throws IOException {
|
||||
* Otherwise, it will create a new connection.
|
||||
*
|
||||
* @param dnAddr Address of the datanode
|
||||
* @param chosenNode Chosen datanode information
|
||||
* @param file File location
|
||||
* @param block The Block object
|
||||
* @param blockToken The access token for security
|
||||
@ -737,6 +737,7 @@ private void closeBlockReader(BlockReader reader) throws IOException {
|
||||
* @return New BlockReader instance
|
||||
*/
|
||||
protected BlockReader getBlockReader(InetSocketAddress dnAddr,
|
||||
DatanodeInfo chosenNode,
|
||||
String file,
|
||||
ExtendedBlock block,
|
||||
Token<BlockTokenIdentifier> blockToken,
|
||||
@ -746,6 +747,12 @@ protected BlockReader getBlockReader(InetSocketAddress dnAddr,
|
||||
boolean verifyChecksum,
|
||||
String clientName)
|
||||
throws IOException {
|
||||
|
||||
if (dfsClient.shouldTryShortCircuitRead(dnAddr)) {
|
||||
return DFSClient.getLocalBlockReader(dfsClient.conf, src, block,
|
||||
blockToken, chosenNode, dfsClient.hdfsTimeout, startOffset);
|
||||
}
|
||||
|
||||
IOException err = null;
|
||||
boolean fromCache = true;
|
||||
|
||||
|
@ -45,6 +45,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.NodeBase;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
@ -723,6 +724,14 @@ public static boolean isFederationEnabled(Configuration conf) {
|
||||
return collection != null && collection.size() != 0;
|
||||
}
|
||||
|
||||
/** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
|
||||
static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
|
||||
DatanodeID datanodeid, Configuration conf, int socketTimeout)
|
||||
throws IOException {
|
||||
return new org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeProtocolTranslatorR23(
|
||||
datanodeid, conf, socketTimeout);
|
||||
}
|
||||
|
||||
/** Create a {@link ClientDatanodeProtocol} proxy */
|
||||
public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
|
||||
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
|
||||
|
@ -24,7 +24,6 @@
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.nio.ByteBuffer;
|
||||
@ -37,12 +36,9 @@
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -85,7 +85,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||
|
||||
Socket dnSock; //for now just sending the status code (e.g. checksumOk) after the read.
|
||||
private ReadableByteChannel in;
|
||||
private DataChecksum checksum;
|
||||
protected DataChecksum checksum;
|
||||
|
||||
private PacketHeader curHeader;
|
||||
private ByteBuffer curPacketBuf = null;
|
||||
@ -96,25 +96,25 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||
private long lastSeqNo = -1;
|
||||
|
||||
/** offset in block where reader wants to actually read */
|
||||
private long startOffset;
|
||||
private final String filename;
|
||||
protected long startOffset;
|
||||
protected final String filename;
|
||||
|
||||
private static DirectBufferPool bufferPool =
|
||||
protected static DirectBufferPool bufferPool =
|
||||
new DirectBufferPool();
|
||||
private ByteBuffer headerBuf = ByteBuffer.allocate(
|
||||
PacketHeader.PKT_HEADER_LEN);
|
||||
|
||||
private int bytesPerChecksum;
|
||||
private int checksumSize;
|
||||
protected int bytesPerChecksum;
|
||||
protected int checksumSize;
|
||||
|
||||
/**
|
||||
* The total number of bytes we need to transfer from the DN.
|
||||
* This is the amount that the user has requested plus some padding
|
||||
* at the beginning so that the read can begin on a chunk boundary.
|
||||
*/
|
||||
private long bytesNeededToFinish;
|
||||
protected long bytesNeededToFinish;
|
||||
|
||||
private final boolean verifyChecksum;
|
||||
protected final boolean verifyChecksum;
|
||||
|
||||
private boolean sentStatusCode = false;
|
||||
|
||||
@ -271,7 +271,7 @@ private void readTrailingEmptyPacket() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
private RemoteBlockReader2(String file, String bpid, long blockId,
|
||||
protected RemoteBlockReader2(String file, String bpid, long blockId,
|
||||
ReadableByteChannel in, DataChecksum checksum, boolean verifyChecksum,
|
||||
long startOffset, long firstChunkOffset, long bytesToRead, Socket dnSock) {
|
||||
// Path is used only for printing block and file information in debug
|
||||
|
@ -0,0 +1,97 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* A block and the full path information to the block data file and
|
||||
* the metadata file stored on the local file system.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class BlockLocalPathInfo implements Writable {
|
||||
static final WritableFactory FACTORY = new WritableFactory() {
|
||||
public Writable newInstance() { return new BlockLocalPathInfo(); }
|
||||
};
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory(BlockLocalPathInfo.class, FACTORY);
|
||||
}
|
||||
|
||||
private ExtendedBlock block;
|
||||
private String localBlockPath = ""; // local file storing the data
|
||||
private String localMetaPath = ""; // local file storing the checksum
|
||||
|
||||
public BlockLocalPathInfo() {}
|
||||
|
||||
/**
|
||||
* Constructs BlockLocalPathInfo.
|
||||
* @param b The block corresponding to this lock path info.
|
||||
* @param file Block data file.
|
||||
* @param metafile Metadata file for the block.
|
||||
*/
|
||||
public BlockLocalPathInfo(ExtendedBlock b, String file, String metafile) {
|
||||
block = b;
|
||||
localBlockPath = file;
|
||||
localMetaPath = metafile;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Block data file.
|
||||
* @return Block data file.
|
||||
*/
|
||||
public String getBlockPath() {return localBlockPath;}
|
||||
|
||||
/**
|
||||
* Get the Block metadata file.
|
||||
* @return Block metadata file.
|
||||
*/
|
||||
public String getMetaPath() {return localMetaPath;}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
block.write(out);
|
||||
Text.writeString(out, localBlockPath);
|
||||
Text.writeString(out, localMetaPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
block = new ExtendedBlock();
|
||||
block.readFields(in);
|
||||
localBlockPath = Text.readString(in);
|
||||
localMetaPath = Text.readString(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get number of bytes in the block.
|
||||
* @return Number of bytes in the block.
|
||||
*/
|
||||
public long getNumBytes() {
|
||||
return block.getNumBytes();
|
||||
}
|
||||
}
|
@ -19,14 +19,14 @@
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.security.KerberosInfo;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenInfo;
|
||||
|
||||
/** An client-datanode protocol for block recovery
|
||||
@ -85,4 +85,29 @@ public interface ClientDatanodeProtocol extends VersionedProtocol {
|
||||
* @throws IOException
|
||||
*/
|
||||
void deleteBlockPool(String bpid, boolean force) throws IOException;
|
||||
|
||||
/**
|
||||
* Retrieves the path names of the block file and metadata file stored on the
|
||||
* local file system.
|
||||
*
|
||||
* In order for this method to work, one of the following should be satisfied:
|
||||
* <ul>
|
||||
* <li>
|
||||
* The client user must be configured at the datanode to be able to use this
|
||||
* method.</li>
|
||||
* <li>
|
||||
* When security is enabled, kerberos authentication must be used to connect
|
||||
* to the datanode.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @param block
|
||||
* the specified block on the local datanode
|
||||
* @param token
|
||||
* the block access token.
|
||||
* @return the BlockLocalPathInfo of a block
|
||||
* @throws IOException
|
||||
* on error
|
||||
*/
|
||||
BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
|
||||
Token<BlockTokenIdentifier> token) throws IOException;
|
||||
}
|
||||
|
@ -852,9 +852,9 @@ public void setTimes(String src, long mtime, long atime)
|
||||
|
||||
/**
|
||||
* Create symlink to a file or directory.
|
||||
* @param target The pathname of the destination that the
|
||||
* @param target The path of the destination that the
|
||||
* link points to.
|
||||
* @param link The pathname of the link being created.
|
||||
* @param link The path of the link being created.
|
||||
* @param dirPerm permissions to use when creating parent directories
|
||||
* @param createParent - if true then missing parent dirs are created
|
||||
* if false then parent must exist
|
||||
@ -875,14 +875,16 @@ public void createSymlink(String target, String link, FsPermission dirPerm,
|
||||
IOException;
|
||||
|
||||
/**
|
||||
* Resolve the first symbolic link on the specified path.
|
||||
* @param path The pathname that needs to be resolved
|
||||
*
|
||||
* @return The pathname after resolving the first symbolic link if any.
|
||||
* Return the target of the given symlink. If there is an intermediate
|
||||
* symlink in the path (ie a symlink leading up to the final path component)
|
||||
* then the given path is returned with this symlink resolved.
|
||||
*
|
||||
* @param path The path with a link that needs resolution.
|
||||
* @return The path after resolving the first symbolic link in the path.
|
||||
* @throws AccessControlException permission denied
|
||||
* @throws FileNotFoundException If <code>path</code> does not exist
|
||||
* @throws IOException If an I/O error occurred
|
||||
* @throws IOException If the given path does not refer to a symlink
|
||||
* or an I/O error occurred
|
||||
*/
|
||||
@Idempotent
|
||||
public String getLinkTarget(String path) throws AccessControlException,
|
||||
|
@ -32,10 +32,10 @@
|
||||
@InterfaceStability.Evolving
|
||||
public final class UnresolvedPathException extends UnresolvedLinkException {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private String originalPath; // The original path containing the link
|
||||
private String linkTarget; // The target of the link
|
||||
private String remainingPath; // The path part following the link
|
||||
|
||||
private String path; // The path containing the link
|
||||
private String preceding; // The path part preceding the link
|
||||
private String remainder; // The path part following the link
|
||||
private String linkTarget; // The link's target
|
||||
|
||||
/**
|
||||
* Used by RemoteException to instantiate an UnresolvedPathException.
|
||||
@ -44,22 +44,30 @@ public UnresolvedPathException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
|
||||
public UnresolvedPathException(String originalPath, String remainingPath,
|
||||
String linkTarget) {
|
||||
this.originalPath = originalPath;
|
||||
this.remainingPath = remainingPath;
|
||||
public UnresolvedPathException(String path, String preceding,
|
||||
String remainder, String linkTarget) {
|
||||
this.path = path;
|
||||
this.preceding = preceding;
|
||||
this.remainder = remainder;
|
||||
this.linkTarget = linkTarget;
|
||||
}
|
||||
|
||||
public Path getUnresolvedPath() throws IOException {
|
||||
return new Path(originalPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a path with the link resolved with the target.
|
||||
*/
|
||||
public Path getResolvedPath() throws IOException {
|
||||
if (remainingPath == null || "".equals(remainingPath)) {
|
||||
return new Path(linkTarget);
|
||||
// If the path is absolute we cam throw out the preceding part and
|
||||
// just append the remainder to the target, otherwise append each
|
||||
// piece to resolve the link in path.
|
||||
boolean noRemainder = (remainder == null || "".equals(remainder));
|
||||
Path target = new Path(linkTarget);
|
||||
if (target.isUriPathAbsolute()) {
|
||||
return noRemainder ? target : new Path(target, remainder);
|
||||
} else {
|
||||
return noRemainder
|
||||
? new Path(preceding, target)
|
||||
: new Path(new Path(preceding, linkTarget), remainder);
|
||||
}
|
||||
return new Path(linkTarget, remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -68,7 +76,7 @@ public String getMessage() {
|
||||
if (msg != null) {
|
||||
return msg;
|
||||
}
|
||||
String myMsg = "Unresolved path " + originalPath;
|
||||
String myMsg = "Unresolved path " + path;
|
||||
try {
|
||||
return getResolvedPath().toString();
|
||||
} catch (IOException e) {
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -21,9 +21,13 @@
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
/**
|
||||
* This class is used on the server side.
|
||||
@ -116,4 +120,10 @@ public void refreshNamenodes() throws IOException {
|
||||
public void deleteBlockPool(String bpid, boolean force) throws IOException {
|
||||
server.deleteBlockPool(bpid, force);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
|
||||
Token<BlockTokenIdentifier> token) throws IOException {
|
||||
return server.getBlockLocalPathInfo(block, token);
|
||||
}
|
||||
}
|
@ -26,14 +26,17 @@
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
|
||||
/**
|
||||
@ -63,6 +66,23 @@ public ClientDatanodeProtocolTranslatorR23(InetSocketAddress addr,
|
||||
rpcProxy = createClientDatanodeProtocolProxy(addr, ticket, conf, factory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param datanodeid Datanode to connect to.
|
||||
* @param conf Configuration.
|
||||
* @param socketTimeout Socket timeout to use.
|
||||
* @throws IOException
|
||||
*/
|
||||
public ClientDatanodeProtocolTranslatorR23(DatanodeID datanodeid,
|
||||
Configuration conf, int socketTimeout) throws IOException {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getHost()
|
||||
+ ":" + datanodeid.getIpcPort());
|
||||
rpcProxy = RPC.getProxy(ClientDatanodeWireProtocol.class,
|
||||
ClientDatanodeWireProtocol.versionID, addr,
|
||||
UserGroupInformation.getCurrentUser(), conf,
|
||||
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
|
||||
}
|
||||
|
||||
static ClientDatanodeWireProtocol createClientDatanodeProtocolProxy(
|
||||
DatanodeID datanodeid, Configuration conf, int socketTimeout,
|
||||
LocatedBlock locatedBlock)
|
||||
@ -134,4 +154,9 @@ public void deleteBlockPool(String bpid, boolean force) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
|
||||
Token<BlockTokenIdentifier> token) throws IOException {
|
||||
return rpcProxy.getBlockLocalPathInfo(block, token);
|
||||
}
|
||||
}
|
||||
|
@ -24,11 +24,15 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
|
||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.security.KerberosInfo;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenInfo;
|
||||
|
||||
/**
|
||||
@ -77,6 +81,13 @@ public interface ClientDatanodeWireProtocol extends VersionedProtocol {
|
||||
*/
|
||||
void deleteBlockPool(String bpid, boolean force) throws IOException;
|
||||
|
||||
/**
|
||||
* The specification of this method matches that of
|
||||
* {@link org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol#getBlockLocalPathInfo(ExtendedBlock, Token)}
|
||||
*/
|
||||
BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
|
||||
Token<BlockTokenIdentifier> token) throws IOException;
|
||||
|
||||
/**
|
||||
* This method is defined to get the protocol signature using
|
||||
* the R23 protocol - hence we have added the suffix of 2 to the method name
|
||||
|
@ -39,7 +39,6 @@
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
@ -29,8 +29,6 @@
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
|
@ -20,14 +20,10 @@
|
||||
import java.io.DataInput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
|
@ -20,8 +20,6 @@
|
||||
import java.io.PrintWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
@ -20,9 +20,6 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
|
@ -0,0 +1,782 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.common.Util.now;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URI;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedList;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
|
||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A thread per namenode to perform:
|
||||
* <ul>
|
||||
* <li> Pre-registration handshake with namenode</li>
|
||||
* <li> Registration with namenode</li>
|
||||
* <li> Send periodic heartbeats to the namenode</li>
|
||||
* <li> Handle commands received from the namenode</li>
|
||||
* </ul>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class BPOfferService implements Runnable {
|
||||
static final Log LOG = DataNode.LOG;
|
||||
|
||||
final InetSocketAddress nnAddr;
|
||||
|
||||
/**
|
||||
* Information about the namespace that this service
|
||||
* is registering with. This is assigned after
|
||||
* the first phase of the handshake.
|
||||
*/
|
||||
NamespaceInfo bpNSInfo;
|
||||
|
||||
/**
|
||||
* The registration information for this block pool.
|
||||
* This is assigned after the second phase of the
|
||||
* handshake.
|
||||
*/
|
||||
DatanodeRegistration bpRegistration;
|
||||
|
||||
long lastBlockReport = 0;
|
||||
long lastDeletedReport = 0;
|
||||
|
||||
boolean resetBlockReportTime = true;
|
||||
|
||||
Thread bpThread;
|
||||
DatanodeProtocol bpNamenode;
|
||||
private long lastHeartbeat = 0;
|
||||
private volatile boolean initialized = false;
|
||||
private final LinkedList<ReceivedDeletedBlockInfo> receivedAndDeletedBlockList
|
||||
= new LinkedList<ReceivedDeletedBlockInfo>();
|
||||
private volatile int pendingReceivedRequests = 0;
|
||||
private volatile boolean shouldServiceRun = true;
|
||||
UpgradeManagerDatanode upgradeManager = null;
|
||||
private final DataNode dn;
|
||||
private final DNConf dnConf;
|
||||
|
||||
BPOfferService(InetSocketAddress nnAddr, DataNode dn) {
|
||||
this.dn = dn;
|
||||
this.nnAddr = nnAddr;
|
||||
this.dnConf = dn.getDnConf();
|
||||
}
|
||||
|
||||
/**
|
||||
* returns true if BP thread has completed initialization of storage
|
||||
* and has registered with the corresponding namenode
|
||||
* @return true if initialized
|
||||
*/
|
||||
public boolean isInitialized() {
|
||||
return initialized;
|
||||
}
|
||||
|
||||
public boolean isAlive() {
|
||||
return shouldServiceRun && bpThread.isAlive();
|
||||
}
|
||||
|
||||
public String getBlockPoolId() {
|
||||
if (bpNSInfo != null) {
|
||||
return bpNSInfo.getBlockPoolID();
|
||||
} else {
|
||||
LOG.warn("Block pool ID needed, but service not yet registered with NN",
|
||||
new Exception("trace"));
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public NamespaceInfo getNamespaceInfo() {
|
||||
return bpNSInfo;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
if (bpNSInfo == null) {
|
||||
// If we haven't yet connected to our NN, we don't yet know our
|
||||
// own block pool ID.
|
||||
// If _none_ of the block pools have connected yet, we don't even
|
||||
// know the storage ID of this DN.
|
||||
String storageId = dn.getStorageId();
|
||||
if (storageId == null || "".equals(storageId)) {
|
||||
storageId = "unknown";
|
||||
}
|
||||
return "Block pool <registering> (storage id " + storageId +
|
||||
") connecting to " + nnAddr;
|
||||
} else {
|
||||
return "Block pool " + getBlockPoolId() +
|
||||
" (storage id " + dn.getStorageId() +
|
||||
") registered with " + nnAddr;
|
||||
}
|
||||
}
|
||||
|
||||
InetSocketAddress getNNSocketAddress() {
|
||||
return nnAddr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to inject a spy NN in the unit tests.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void setNameNode(DatanodeProtocol dnProtocol) {
|
||||
bpNamenode = dnProtocol;
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the first part of the handshake with the NameNode.
|
||||
* This calls <code>versionRequest</code> to determine the NN's
|
||||
* namespace and version info. It automatically retries until
|
||||
* the NN responds or the DN is shutting down.
|
||||
*
|
||||
* @return the NamespaceInfo
|
||||
* @throws IncorrectVersionException if the remote NN does not match
|
||||
* this DN's version
|
||||
*/
|
||||
NamespaceInfo retrieveNamespaceInfo() throws IncorrectVersionException {
|
||||
NamespaceInfo nsInfo = null;
|
||||
while (shouldRun()) {
|
||||
try {
|
||||
nsInfo = bpNamenode.versionRequest();
|
||||
LOG.debug(this + " received versionRequest response: " + nsInfo);
|
||||
break;
|
||||
} catch(SocketTimeoutException e) { // namenode is busy
|
||||
LOG.warn("Problem connecting to server: " + nnAddr);
|
||||
} catch(IOException e ) { // namenode is not available
|
||||
LOG.warn("Problem connecting to server: " + nnAddr);
|
||||
}
|
||||
|
||||
// try again in a second
|
||||
sleepAndLogInterrupts(5000, "requesting version info from NN");
|
||||
}
|
||||
|
||||
if (nsInfo != null) {
|
||||
checkNNVersion(nsInfo);
|
||||
}
|
||||
return nsInfo;
|
||||
}
|
||||
|
||||
private void checkNNVersion(NamespaceInfo nsInfo)
|
||||
throws IncorrectVersionException {
|
||||
// build and layout versions should match
|
||||
String nsBuildVer = nsInfo.getBuildVersion();
|
||||
String stBuildVer = Storage.getBuildVersion();
|
||||
if (!nsBuildVer.equals(stBuildVer)) {
|
||||
LOG.warn("Data-node and name-node Build versions must be the same. " +
|
||||
"Namenode build version: " + nsBuildVer + "Datanode " +
|
||||
"build version: " + stBuildVer);
|
||||
throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer);
|
||||
}
|
||||
|
||||
if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) {
|
||||
LOG.warn("Data-node and name-node layout versions must be the same." +
|
||||
" Expected: "+ HdfsConstants.LAYOUT_VERSION +
|
||||
" actual "+ bpNSInfo.getLayoutVersion());
|
||||
throw new IncorrectVersionException(
|
||||
bpNSInfo.getLayoutVersion(), "namenode");
|
||||
}
|
||||
}
|
||||
|
||||
private void connectToNNAndHandshake() throws IOException {
|
||||
// get NN proxy
|
||||
bpNamenode = (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
|
||||
DatanodeProtocol.versionID, nnAddr, dn.getConf());
|
||||
|
||||
// First phase of the handshake with NN - get the namespace
|
||||
// info.
|
||||
bpNSInfo = retrieveNamespaceInfo();
|
||||
|
||||
// Now that we know the namespace ID, etc, we can pass this to the DN.
|
||||
// The DN can now initialize its local storage if we are the
|
||||
// first BP to handshake, etc.
|
||||
dn.initBlockPool(this);
|
||||
|
||||
// Second phase of the handshake with the NN.
|
||||
register();
|
||||
}
|
||||
|
||||
/**
|
||||
* This methods arranges for the data node to send the block report at
|
||||
* the next heartbeat.
|
||||
*/
|
||||
void scheduleBlockReport(long delay) {
|
||||
if (delay > 0) { // send BR after random delay
|
||||
lastBlockReport = System.currentTimeMillis()
|
||||
- ( dnConf.blockReportInterval - DFSUtil.getRandom().nextInt((int)(delay)));
|
||||
} else { // send at next heartbeat
|
||||
lastBlockReport = lastHeartbeat - dnConf.blockReportInterval;
|
||||
}
|
||||
resetBlockReportTime = true; // reset future BRs for randomness
|
||||
}
|
||||
|
||||
void reportBadBlocks(ExtendedBlock block) {
|
||||
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
|
||||
LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) };
|
||||
|
||||
try {
|
||||
bpNamenode.reportBadBlocks(blocks);
|
||||
} catch (IOException e){
|
||||
/* One common reason is that NameNode could be in safe mode.
|
||||
* Should we keep on retrying in that case?
|
||||
*/
|
||||
LOG.warn("Failed to report bad block " + block + " to namenode : "
|
||||
+ " Exception", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Report received blocks and delete hints to the Namenode
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
private void reportReceivedDeletedBlocks() throws IOException {
|
||||
|
||||
// check if there are newly received blocks
|
||||
ReceivedDeletedBlockInfo[] receivedAndDeletedBlockArray = null;
|
||||
int currentReceivedRequestsCounter;
|
||||
synchronized (receivedAndDeletedBlockList) {
|
||||
currentReceivedRequestsCounter = pendingReceivedRequests;
|
||||
int numBlocks = receivedAndDeletedBlockList.size();
|
||||
if (numBlocks > 0) {
|
||||
//
|
||||
// Send newly-received and deleted blockids to namenode
|
||||
//
|
||||
receivedAndDeletedBlockArray = receivedAndDeletedBlockList
|
||||
.toArray(new ReceivedDeletedBlockInfo[numBlocks]);
|
||||
}
|
||||
}
|
||||
if (receivedAndDeletedBlockArray != null) {
|
||||
bpNamenode.blockReceivedAndDeleted(bpRegistration, getBlockPoolId(),
|
||||
receivedAndDeletedBlockArray);
|
||||
synchronized (receivedAndDeletedBlockList) {
|
||||
for (int i = 0; i < receivedAndDeletedBlockArray.length; i++) {
|
||||
receivedAndDeletedBlockList.remove(receivedAndDeletedBlockArray[i]);
|
||||
}
|
||||
pendingReceivedRequests -= currentReceivedRequestsCounter;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Informing the name node could take a long long time! Should we wait
|
||||
* till namenode is informed before responding with success to the
|
||||
* client? For now we don't.
|
||||
*/
|
||||
void notifyNamenodeReceivedBlock(ExtendedBlock block, String delHint) {
|
||||
if (block == null || delHint == null) {
|
||||
throw new IllegalArgumentException(block == null ? "Block is null"
|
||||
: "delHint is null");
|
||||
}
|
||||
|
||||
if (!block.getBlockPoolId().equals(getBlockPoolId())) {
|
||||
LOG.warn("BlockPool mismatch " + block.getBlockPoolId() + " vs. "
|
||||
+ getBlockPoolId());
|
||||
return;
|
||||
}
|
||||
|
||||
synchronized (receivedAndDeletedBlockList) {
|
||||
receivedAndDeletedBlockList.add(new ReceivedDeletedBlockInfo(block
|
||||
.getLocalBlock(), delHint));
|
||||
pendingReceivedRequests++;
|
||||
receivedAndDeletedBlockList.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
void notifyNamenodeDeletedBlock(ExtendedBlock block) {
|
||||
if (block == null) {
|
||||
throw new IllegalArgumentException("Block is null");
|
||||
}
|
||||
|
||||
if (!block.getBlockPoolId().equals(getBlockPoolId())) {
|
||||
LOG.warn("BlockPool mismatch " + block.getBlockPoolId() + " vs. "
|
||||
+ getBlockPoolId());
|
||||
return;
|
||||
}
|
||||
|
||||
synchronized (receivedAndDeletedBlockList) {
|
||||
receivedAndDeletedBlockList.add(new ReceivedDeletedBlockInfo(block
|
||||
.getLocalBlock(), ReceivedDeletedBlockInfo.TODELETE_HINT));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Report the list blocks to the Namenode
|
||||
* @throws IOException
|
||||
*/
|
||||
DatanodeCommand blockReport() throws IOException {
|
||||
// send block report if timer has expired.
|
||||
DatanodeCommand cmd = null;
|
||||
long startTime = now();
|
||||
if (startTime - lastBlockReport > dnConf.blockReportInterval) {
|
||||
|
||||
// Create block report
|
||||
long brCreateStartTime = now();
|
||||
BlockListAsLongs bReport = dn.data.getBlockReport(getBlockPoolId());
|
||||
|
||||
// Send block report
|
||||
long brSendStartTime = now();
|
||||
cmd = bpNamenode.blockReport(bpRegistration, getBlockPoolId(), bReport
|
||||
.getBlockListAsLongs());
|
||||
|
||||
// Log the block report processing stats from Datanode perspective
|
||||
long brSendCost = now() - brSendStartTime;
|
||||
long brCreateCost = brSendStartTime - brCreateStartTime;
|
||||
dn.metrics.addBlockReport(brSendCost);
|
||||
LOG.info("BlockReport of " + bReport.getNumberOfBlocks()
|
||||
+ " blocks took " + brCreateCost + " msec to generate and "
|
||||
+ brSendCost + " msecs for RPC and NN processing");
|
||||
|
||||
// If we have sent the first block report, then wait a random
|
||||
// time before we start the periodic block reports.
|
||||
if (resetBlockReportTime) {
|
||||
lastBlockReport = startTime - DFSUtil.getRandom().nextInt((int)(dnConf.blockReportInterval));
|
||||
resetBlockReportTime = false;
|
||||
} else {
|
||||
/* say the last block report was at 8:20:14. The current report
|
||||
* should have started around 9:20:14 (default 1 hour interval).
|
||||
* If current time is :
|
||||
* 1) normal like 9:20:18, next report should be at 10:20:14
|
||||
* 2) unexpected like 11:35:43, next report should be at 12:20:14
|
||||
*/
|
||||
lastBlockReport += (now() - lastBlockReport) /
|
||||
dnConf.blockReportInterval * dnConf.blockReportInterval;
|
||||
}
|
||||
LOG.info("sent block report, processed command:" + cmd);
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
|
||||
|
||||
DatanodeCommand [] sendHeartBeat() throws IOException {
|
||||
return bpNamenode.sendHeartbeat(bpRegistration,
|
||||
dn.data.getCapacity(),
|
||||
dn.data.getDfsUsed(),
|
||||
dn.data.getRemaining(),
|
||||
dn.data.getBlockPoolUsed(getBlockPoolId()),
|
||||
dn.xmitsInProgress.get(),
|
||||
dn.getXceiverCount(), dn.data.getNumFailedVolumes());
|
||||
}
|
||||
|
||||
//This must be called only by blockPoolManager
|
||||
void start() {
|
||||
if ((bpThread != null) && (bpThread.isAlive())) {
|
||||
//Thread is started already
|
||||
return;
|
||||
}
|
||||
bpThread = new Thread(this, formatThreadName());
|
||||
bpThread.setDaemon(true); // needed for JUnit testing
|
||||
bpThread.start();
|
||||
}
|
||||
|
||||
private String formatThreadName() {
|
||||
Collection<URI> dataDirs = DataNode.getStorageDirs(dn.getConf());
|
||||
return "DataNode: [" +
|
||||
StringUtils.uriToString(dataDirs.toArray(new URI[0])) + "] " +
|
||||
" heartbeating to " + nnAddr;
|
||||
}
|
||||
|
||||
//This must be called only by blockPoolManager.
|
||||
void stop() {
|
||||
shouldServiceRun = false;
|
||||
if (bpThread != null) {
|
||||
bpThread.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
//This must be called only by blockPoolManager
|
||||
void join() {
|
||||
try {
|
||||
if (bpThread != null) {
|
||||
bpThread.join();
|
||||
}
|
||||
} catch (InterruptedException ie) { }
|
||||
}
|
||||
|
||||
//Cleanup method to be called by current thread before exiting.
|
||||
private synchronized void cleanUp() {
|
||||
|
||||
if(upgradeManager != null)
|
||||
upgradeManager.shutdownUpgrade();
|
||||
shouldServiceRun = false;
|
||||
RPC.stopProxy(bpNamenode);
|
||||
dn.shutdownBlockPool(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Main loop for each BP thread. Run until shutdown,
|
||||
* forever calling remote NameNode functions.
|
||||
*/
|
||||
private void offerService() throws Exception {
|
||||
LOG.info("For namenode " + nnAddr + " using DELETEREPORT_INTERVAL of "
|
||||
+ dnConf.deleteReportInterval + " msec " + " BLOCKREPORT_INTERVAL of "
|
||||
+ dnConf.blockReportInterval + "msec" + " Initial delay: "
|
||||
+ dnConf.initialBlockReportDelay + "msec" + "; heartBeatInterval="
|
||||
+ dnConf.heartBeatInterval);
|
||||
|
||||
//
|
||||
// Now loop for a long time....
|
||||
//
|
||||
while (shouldRun()) {
|
||||
try {
|
||||
long startTime = now();
|
||||
|
||||
//
|
||||
// Every so often, send heartbeat or block-report
|
||||
//
|
||||
if (startTime - lastHeartbeat > dnConf.heartBeatInterval) {
|
||||
//
|
||||
// All heartbeat messages include following info:
|
||||
// -- Datanode name
|
||||
// -- data transfer port
|
||||
// -- Total capacity
|
||||
// -- Bytes remaining
|
||||
//
|
||||
lastHeartbeat = startTime;
|
||||
if (!dn.areHeartbeatsDisabledForTests()) {
|
||||
DatanodeCommand[] cmds = sendHeartBeat();
|
||||
dn.metrics.addHeartbeat(now() - startTime);
|
||||
|
||||
long startProcessCommands = now();
|
||||
if (!processCommand(cmds))
|
||||
continue;
|
||||
long endProcessCommands = now();
|
||||
if (endProcessCommands - startProcessCommands > 2000) {
|
||||
LOG.info("Took " + (endProcessCommands - startProcessCommands) +
|
||||
"ms to process " + cmds.length + " commands from NN");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pendingReceivedRequests > 0
|
||||
|| (startTime - lastDeletedReport > dnConf.deleteReportInterval)) {
|
||||
reportReceivedDeletedBlocks();
|
||||
lastDeletedReport = startTime;
|
||||
}
|
||||
|
||||
DatanodeCommand cmd = blockReport();
|
||||
processCommand(cmd);
|
||||
|
||||
// Now safe to start scanning the block pool
|
||||
if (dn.blockScanner != null) {
|
||||
dn.blockScanner.addBlockPool(this.getBlockPoolId());
|
||||
}
|
||||
|
||||
//
|
||||
// There is no work to do; sleep until hearbeat timer elapses,
|
||||
// or work arrives, and then iterate again.
|
||||
//
|
||||
long waitTime = dnConf.heartBeatInterval -
|
||||
(System.currentTimeMillis() - lastHeartbeat);
|
||||
synchronized(receivedAndDeletedBlockList) {
|
||||
if (waitTime > 0 && pendingReceivedRequests == 0) {
|
||||
try {
|
||||
receivedAndDeletedBlockList.wait(waitTime);
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.warn("BPOfferService for " + this + " interrupted");
|
||||
}
|
||||
}
|
||||
} // synchronized
|
||||
} catch(RemoteException re) {
|
||||
String reClass = re.getClassName();
|
||||
if (UnregisteredNodeException.class.getName().equals(reClass) ||
|
||||
DisallowedDatanodeException.class.getName().equals(reClass) ||
|
||||
IncorrectVersionException.class.getName().equals(reClass)) {
|
||||
LOG.warn(this + " is shutting down", re);
|
||||
shouldServiceRun = false;
|
||||
return;
|
||||
}
|
||||
LOG.warn("RemoteException in offerService", re);
|
||||
try {
|
||||
long sleepTime = Math.min(1000, dnConf.heartBeatInterval);
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("IOException in offerService", e);
|
||||
}
|
||||
} // while (shouldRun())
|
||||
} // offerService
|
||||
|
||||
/**
|
||||
* Register one bp with the corresponding NameNode
|
||||
* <p>
|
||||
* The bpDatanode needs to register with the namenode on startup in order
|
||||
* 1) to report which storage it is serving now and
|
||||
* 2) to receive a registrationID
|
||||
*
|
||||
* issued by the namenode to recognize registered datanodes.
|
||||
*
|
||||
* @see FSNamesystem#registerDatanode(DatanodeRegistration)
|
||||
* @throws IOException
|
||||
*/
|
||||
void register() throws IOException {
|
||||
Preconditions.checkState(bpNSInfo != null,
|
||||
"register() should be called after handshake()");
|
||||
|
||||
// The handshake() phase loaded the block pool storage
|
||||
// off disk - so update the bpRegistration object from that info
|
||||
bpRegistration = dn.createBPRegistration(bpNSInfo);
|
||||
|
||||
LOG.info(this + " beginning handshake with NN");
|
||||
|
||||
while (shouldRun()) {
|
||||
try {
|
||||
// Use returned registration from namenode with updated machine name.
|
||||
bpRegistration = bpNamenode.registerDatanode(bpRegistration);
|
||||
break;
|
||||
} catch(SocketTimeoutException e) { // namenode is busy
|
||||
LOG.info("Problem connecting to server: " + nnAddr);
|
||||
sleepAndLogInterrupts(1000, "connecting to server");
|
||||
}
|
||||
}
|
||||
|
||||
LOG.info("Block pool " + this + " successfully registered with NN");
|
||||
dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
|
||||
|
||||
// random short delay - helps scatter the BR from all DNs
|
||||
scheduleBlockReport(dnConf.initialBlockReportDelay);
|
||||
}
|
||||
|
||||
|
||||
private void sleepAndLogInterrupts(int millis,
|
||||
String stateString) {
|
||||
try {
|
||||
Thread.sleep(millis);
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.info("BPOfferService " + this +
|
||||
" interrupted while " + stateString);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* No matter what kind of exception we get, keep retrying to offerService().
|
||||
* That's the loop that connects to the NameNode and provides basic DataNode
|
||||
* functionality.
|
||||
*
|
||||
* Only stop when "shouldRun" or "shouldServiceRun" is turned off, which can
|
||||
* happen either at shutdown or due to refreshNamenodes.
|
||||
*/
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.info(this + " starting to offer service");
|
||||
|
||||
try {
|
||||
// init stuff
|
||||
try {
|
||||
// setup storage
|
||||
connectToNNAndHandshake();
|
||||
} catch (IOException ioe) {
|
||||
// Initial handshake, storage recovery or registration failed
|
||||
// End BPOfferService thread
|
||||
LOG.fatal("Initialization failed for block pool " + this, ioe);
|
||||
return;
|
||||
}
|
||||
|
||||
initialized = true; // bp is initialized;
|
||||
|
||||
while (shouldRun()) {
|
||||
try {
|
||||
startDistributedUpgradeIfNeeded();
|
||||
offerService();
|
||||
} catch (Exception ex) {
|
||||
LOG.error("Exception in BPOfferService for " + this, ex);
|
||||
sleepAndLogInterrupts(5000, "offering service");
|
||||
}
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.warn("Unexpected exception in block pool " + this, ex);
|
||||
} finally {
|
||||
LOG.warn("Ending block pool service for: " + this);
|
||||
cleanUp();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean shouldRun() {
|
||||
return shouldServiceRun && dn.shouldRun();
|
||||
}
|
||||
|
||||
/**
|
||||
* Process an array of datanode commands
|
||||
*
|
||||
* @param cmds an array of datanode commands
|
||||
* @return true if further processing may be required or false otherwise.
|
||||
*/
|
||||
private boolean processCommand(DatanodeCommand[] cmds) {
|
||||
if (cmds != null) {
|
||||
for (DatanodeCommand cmd : cmds) {
|
||||
try {
|
||||
if (processCommand(cmd) == false) {
|
||||
return false;
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Error processing datanode Command", ioe);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param cmd
|
||||
* @return true if further processing may be required or false otherwise.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean processCommand(DatanodeCommand cmd) throws IOException {
|
||||
if (cmd == null)
|
||||
return true;
|
||||
final BlockCommand bcmd =
|
||||
cmd instanceof BlockCommand? (BlockCommand)cmd: null;
|
||||
|
||||
switch(cmd.getAction()) {
|
||||
case DatanodeProtocol.DNA_TRANSFER:
|
||||
// Send a copy of a block to another datanode
|
||||
dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets());
|
||||
dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length);
|
||||
break;
|
||||
case DatanodeProtocol.DNA_INVALIDATE:
|
||||
//
|
||||
// Some local block(s) are obsolete and can be
|
||||
// safely garbage-collected.
|
||||
//
|
||||
Block toDelete[] = bcmd.getBlocks();
|
||||
try {
|
||||
if (dn.blockScanner != null) {
|
||||
dn.blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete);
|
||||
}
|
||||
// using global fsdataset
|
||||
dn.data.invalidate(bcmd.getBlockPoolId(), toDelete);
|
||||
} catch(IOException e) {
|
||||
dn.checkDiskError();
|
||||
throw e;
|
||||
}
|
||||
dn.metrics.incrBlocksRemoved(toDelete.length);
|
||||
break;
|
||||
case DatanodeProtocol.DNA_SHUTDOWN:
|
||||
// shut down the data node
|
||||
shouldServiceRun = false;
|
||||
return false;
|
||||
case DatanodeProtocol.DNA_REGISTER:
|
||||
// namenode requested a registration - at start or if NN lost contact
|
||||
LOG.info("DatanodeCommand action: DNA_REGISTER");
|
||||
if (shouldRun()) {
|
||||
// re-retrieve namespace info to make sure that, if the NN
|
||||
// was restarted, we still match its version (HDFS-2120)
|
||||
retrieveNamespaceInfo();
|
||||
// and re-register
|
||||
register();
|
||||
}
|
||||
break;
|
||||
case DatanodeProtocol.DNA_FINALIZE:
|
||||
String bp = ((FinalizeCommand) cmd).getBlockPoolId();
|
||||
assert getBlockPoolId().equals(bp) :
|
||||
"BP " + getBlockPoolId() + " received DNA_FINALIZE " +
|
||||
"for other block pool " + bp;
|
||||
|
||||
dn.finalizeUpgradeForPool(bp);
|
||||
break;
|
||||
case UpgradeCommand.UC_ACTION_START_UPGRADE:
|
||||
// start distributed upgrade here
|
||||
processDistributedUpgradeCommand((UpgradeCommand)cmd);
|
||||
break;
|
||||
case DatanodeProtocol.DNA_RECOVERBLOCK:
|
||||
dn.recoverBlocks(((BlockRecoveryCommand)cmd).getRecoveringBlocks());
|
||||
break;
|
||||
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
|
||||
LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE");
|
||||
if (dn.isBlockTokenEnabled) {
|
||||
dn.blockPoolTokenSecretManager.setKeys(getBlockPoolId(),
|
||||
((KeyUpdateCommand) cmd).getExportedKeys());
|
||||
}
|
||||
break;
|
||||
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
|
||||
LOG.info("DatanodeCommand action: DNA_BALANCERBANDWIDTHUPDATE");
|
||||
long bandwidth =
|
||||
((BalancerBandwidthCommand) cmd).getBalancerBandwidthValue();
|
||||
if (bandwidth > 0) {
|
||||
DataXceiverServer dxcs =
|
||||
(DataXceiverServer) dn.dataXceiverServer.getRunnable();
|
||||
dxcs.balanceThrottler.setBandwidth(bandwidth);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void processDistributedUpgradeCommand(UpgradeCommand comm)
|
||||
throws IOException {
|
||||
UpgradeManagerDatanode upgradeManager = getUpgradeManager();
|
||||
upgradeManager.processUpgradeCommand(comm);
|
||||
}
|
||||
|
||||
synchronized UpgradeManagerDatanode getUpgradeManager() {
|
||||
if(upgradeManager == null)
|
||||
upgradeManager =
|
||||
new UpgradeManagerDatanode(dn, getBlockPoolId());
|
||||
|
||||
return upgradeManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start distributed upgrade if it should be initiated by the data-node.
|
||||
*/
|
||||
private void startDistributedUpgradeIfNeeded() throws IOException {
|
||||
UpgradeManagerDatanode um = getUpgradeManager();
|
||||
|
||||
if(!um.getUpgradeState())
|
||||
return;
|
||||
um.setUpgradeState(false, um.getUpgradeVersion());
|
||||
um.startUpgrade();
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
@ -26,9 +26,11 @@
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
|
||||
import org.apache.commons.httpclient.methods.GetMethod;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
@ -36,7 +38,9 @@
|
||||
* This is not related to the Block related functionality in Namenode.
|
||||
* The biggest part of data block metadata is CRC for the block.
|
||||
*/
|
||||
class BlockMetadataHeader {
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class BlockMetadataHeader {
|
||||
|
||||
static final short METADATA_VERSION = FSDataset.METADATA_VERSION;
|
||||
|
||||
@ -53,11 +57,13 @@ class BlockMetadataHeader {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
short getVersion() {
|
||||
/** Get the version */
|
||||
public short getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
DataChecksum getChecksum() {
|
||||
/** Get the checksum */
|
||||
public DataChecksum getChecksum() {
|
||||
return checksum;
|
||||
}
|
||||
|
||||
@ -68,7 +74,7 @@ DataChecksum getChecksum() {
|
||||
* @return Metadata Header
|
||||
* @throws IOException
|
||||
*/
|
||||
static BlockMetadataHeader readHeader(DataInputStream in) throws IOException {
|
||||
public static BlockMetadataHeader readHeader(DataInputStream in) throws IOException {
|
||||
return readHeader(in.readShort(), in);
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,6 @@
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.TreeSet;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
@ -252,8 +251,9 @@ private synchronized long getNewBlockScanTime() {
|
||||
*/
|
||||
long period = Math.min(scanPeriod,
|
||||
Math.max(blockMap.size(),1) * 600 * 1000L);
|
||||
int periodInt = Math.abs((int)period);
|
||||
return System.currentTimeMillis() - scanPeriod +
|
||||
DFSUtil.getRandom().nextInt((int)period);
|
||||
DFSUtil.getRandom().nextInt(periodInt);
|
||||
}
|
||||
|
||||
/** Adds block to list of blocks */
|
||||
|
@ -50,7 +50,6 @@
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.PureJavaCrc32;
|
||||
|
||||
/** A class that receives a block and writes to its own disk, meanwhile
|
||||
* may copies it to another site. If a throttler is provided,
|
||||
|
@ -31,7 +31,6 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -48,8 +48,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
|
||||
@ -128,7 +126,7 @@ private void updateCurrentThreadName(String status) {
|
||||
public void run() {
|
||||
int opsProcessed = 0;
|
||||
Op op = null;
|
||||
dataXceiverServer.childSockets.put(s, s);
|
||||
dataXceiverServer.childSockets.add(s);
|
||||
try {
|
||||
int stdTimeout = s.getSoTimeout();
|
||||
|
||||
@ -165,14 +163,6 @@ public void run() {
|
||||
s.setSoTimeout(stdTimeout);
|
||||
}
|
||||
|
||||
// Make sure the xceiver count is not exceeded
|
||||
int curXceiverCount = datanode.getXceiverCount();
|
||||
if (curXceiverCount > dataXceiverServer.maxXceiverCount) {
|
||||
throw new IOException("xceiverCount " + curXceiverCount
|
||||
+ " exceeds the limit of concurrent xcievers "
|
||||
+ dataXceiverServer.maxXceiverCount);
|
||||
}
|
||||
|
||||
opStartTime = now();
|
||||
processOp(op);
|
||||
++opsProcessed;
|
||||
|
@ -23,9 +23,9 @@
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.nio.channels.AsynchronousCloseException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -48,8 +48,8 @@ class DataXceiverServer implements Runnable {
|
||||
ServerSocket ss;
|
||||
DataNode datanode;
|
||||
// Record all sockets opened for data transfer
|
||||
Map<Socket, Socket> childSockets = Collections.synchronizedMap(
|
||||
new HashMap<Socket, Socket>());
|
||||
Set<Socket> childSockets = Collections.synchronizedSet(
|
||||
new HashSet<Socket>());
|
||||
|
||||
/**
|
||||
* Maximal number of concurrent xceivers per node.
|
||||
@ -135,6 +135,15 @@ public void run() {
|
||||
try {
|
||||
s = ss.accept();
|
||||
s.setTcpNoDelay(true);
|
||||
|
||||
// Make sure the xceiver count is not exceeded
|
||||
int curXceiverCount = datanode.getXceiverCount();
|
||||
if (curXceiverCount > maxXceiverCount) {
|
||||
throw new IOException("Xceiver count " + curXceiverCount
|
||||
+ " exceeds the limit of concurrent xcievers: "
|
||||
+ maxXceiverCount);
|
||||
}
|
||||
|
||||
new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this))
|
||||
.start();
|
||||
} catch (SocketTimeoutException ignored) {
|
||||
@ -184,7 +193,7 @@ void kill() {
|
||||
|
||||
// close all the sockets that were accepted earlier
|
||||
synchronized (childSockets) {
|
||||
for (Iterator<Socket> it = childSockets.values().iterator();
|
||||
for (Iterator<Socket> it = childSockets.iterator();
|
||||
it.hasNext();) {
|
||||
Socket thissock = it.next();
|
||||
try {
|
||||
|
@ -52,6 +52,7 @@
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||
@ -459,7 +460,7 @@ private long validateIntegrity(File blockFile, long genStamp) {
|
||||
long metaFileLen = metaFile.length();
|
||||
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
|
||||
if (!blockFile.exists() || blockFileLen == 0 ||
|
||||
!metaFile.exists() || metaFileLen < (long)crcHeaderLen) {
|
||||
!metaFile.exists() || metaFileLen < crcHeaderLen) {
|
||||
return 0;
|
||||
}
|
||||
checksumIn = new DataInputStream(
|
||||
@ -578,7 +579,7 @@ long getBlockPoolUsed(String bpid) throws IOException {
|
||||
* reserved capacity.
|
||||
* @return the unreserved number of bytes left in this filesystem. May be zero.
|
||||
*/
|
||||
long getCapacity() throws IOException {
|
||||
long getCapacity() {
|
||||
long remaining = usage.getCapacity() - reserved;
|
||||
return remaining > 0 ? remaining : 0;
|
||||
}
|
||||
@ -818,7 +819,7 @@ private long getBlockPoolUsed(String bpid) throws IOException {
|
||||
return dfsUsed;
|
||||
}
|
||||
|
||||
private long getCapacity() throws IOException {
|
||||
private long getCapacity() {
|
||||
long capacity = 0L;
|
||||
for (FSVolume vol : volumes) {
|
||||
capacity += vol.getCapacity();
|
||||
@ -1667,7 +1668,7 @@ private void bumpReplicaGS(ReplicaInfo replicaInfo,
|
||||
}
|
||||
if (!oldmeta.renameTo(newmeta)) {
|
||||
replicaInfo.setGenerationStamp(oldGS); // restore old GS
|
||||
throw new IOException("Block " + (Block)replicaInfo + " reopen failed. " +
|
||||
throw new IOException("Block " + replicaInfo + " reopen failed. " +
|
||||
" Unable to move meta file " + oldmeta +
|
||||
" to " + newmeta);
|
||||
}
|
||||
@ -2018,7 +2019,7 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
|
||||
/**
|
||||
* Find the file corresponding to the block and return it if it exists.
|
||||
*/
|
||||
File validateBlockFile(String bpid, Block b) throws IOException {
|
||||
File validateBlockFile(String bpid, Block b) {
|
||||
//Should we check for metadata file too?
|
||||
File f = getFile(bpid, b);
|
||||
|
||||
@ -2327,7 +2328,7 @@ public void checkAndUpdate(String bpid, long blockId, File diskFile,
|
||||
if (datanode.blockScanner != null) {
|
||||
datanode.blockScanner.addBlock(new ExtendedBlock(bpid, diskBlockInfo));
|
||||
}
|
||||
DataNode.LOG.warn("Added missing block to memory " + (Block)diskBlockInfo);
|
||||
DataNode.LOG.warn("Added missing block to memory " + diskBlockInfo);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@ -2600,7 +2601,7 @@ public synchronized void shutdownBlockPool(String bpid) {
|
||||
* get list of all bpids
|
||||
* @return list of bpids
|
||||
*/
|
||||
public String [] getBPIdlist() throws IOException {
|
||||
public String [] getBPIdlist() {
|
||||
return volumeMap.getBlockPoolList();
|
||||
}
|
||||
|
||||
@ -2658,4 +2659,14 @@ public synchronized void deleteBlockPool(String bpid, boolean force)
|
||||
volume.deleteBPDirectories(bpid, force);
|
||||
}
|
||||
}
|
||||
|
||||
@Override // FSDatasetInterface
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
|
||||
throws IOException {
|
||||
File datafile = getBlockFile(block);
|
||||
File metafile = getMetaFile(datafile, block.getGenerationStamp());
|
||||
BlockLocalPathInfo info = new BlockLocalPathInfo(block,
|
||||
datafile.getAbsolutePath(), metafile.getAbsolutePath());
|
||||
return info;
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.File;
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@ -31,6 +32,7 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
@ -402,4 +404,9 @@ public ReplicaInfo updateReplicaUnderRecovery(
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteBlockPool(String bpid, boolean force) throws IOException;
|
||||
|
||||
/**
|
||||
* Get {@link BlockLocalPathInfo} for the given block.
|
||||
**/
|
||||
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) throws IOException;
|
||||
}
|
||||
|
@ -17,7 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -60,6 +60,7 @@ public class DataNodeMetrics {
|
||||
@Metric MutableCounterLong readsFromRemoteClient;
|
||||
@Metric MutableCounterLong writesFromLocalClient;
|
||||
@Metric MutableCounterLong writesFromRemoteClient;
|
||||
@Metric MutableCounterLong blocksGetLocalPathInfo;
|
||||
|
||||
@Metric MutableCounterLong volumeFailures;
|
||||
|
||||
@ -165,4 +166,9 @@ public void incrReadsFromClient(boolean local) {
|
||||
public void incrVolumeFailures() {
|
||||
volumeFailures.incr();
|
||||
}
|
||||
|
||||
/** Increment for getBlockLocalPathInfo calls */
|
||||
public void incrBlocksGetLocalPathInfo() {
|
||||
blocksGetLocalPathInfo.incr();
|
||||
}
|
||||
}
|
||||
|
@ -17,14 +17,10 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||
|
||||
/**
|
||||
|
@ -36,7 +36,6 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
|
||||
|
@ -23,8 +23,6 @@
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServlet;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -26,10 +26,7 @@
|
||||
import java.io.DataInputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
|
@ -37,7 +37,6 @@
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*;
|
||||
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.common.Util.now;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@ -34,7 +33,6 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.LogHeaderCorruptException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp;
|
||||
@ -57,8 +55,6 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||
import org.apache.hadoop.hdfs.util.Holder;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
public class FSEditLogLoader {
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -23,28 +23,16 @@
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
|
||||
public static final Log LOG = LogFactory.getLog(
|
||||
|
@ -305,7 +305,20 @@ private static final void logAuditEvent(UserGroupInformation ugi,
|
||||
* @throws IOException if loading fails
|
||||
*/
|
||||
public static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
|
||||
FSImage fsImage = new FSImage(conf);
|
||||
Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
|
||||
Collection<URI> namespaceEditsDirs =
|
||||
FSNamesystem.getNamespaceEditsDirs(conf);
|
||||
|
||||
if (namespaceDirs.size() == 1) {
|
||||
LOG.warn("Only one " + DFS_NAMENODE_NAME_DIR_KEY
|
||||
+ " directory configured , beware data loss!");
|
||||
}
|
||||
if (namespaceEditsDirs.size() == 1) {
|
||||
LOG.warn("Only one " + DFS_NAMENODE_EDITS_DIR_KEY
|
||||
+ " directory configured , beware data loss!");
|
||||
}
|
||||
|
||||
FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs);
|
||||
FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
|
||||
|
||||
long loadStart = now();
|
||||
@ -2060,10 +2073,12 @@ void removePathAndBlocks(String src, List<Block> blocks) {
|
||||
}
|
||||
}
|
||||
|
||||
/** Get the file info for a specific file.
|
||||
/**
|
||||
* Get the file info for a specific file.
|
||||
*
|
||||
* @param src The string representation of the path to the file
|
||||
* @param resolveLink whether to throw UnresolvedLinkException
|
||||
* if src refers to a symlinks
|
||||
* if src refers to a symlink
|
||||
*
|
||||
* @throws AccessControlException if access is denied
|
||||
* @throws UnresolvedLinkException if a symlink is encountered.
|
||||
@ -2271,6 +2286,7 @@ boolean internalReleaseLease(Lease lease, String src,
|
||||
// If the penultimate block is not COMPLETE, then it must be COMMITTED.
|
||||
if(nrCompleteBlocks < nrBlocks - 2 ||
|
||||
nrCompleteBlocks == nrBlocks - 2 &&
|
||||
curBlock != null &&
|
||||
curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
|
||||
final String message = "DIR* NameSystem.internalReleaseLease: "
|
||||
+ "attempt to release a create lock on "
|
||||
@ -2459,7 +2475,7 @@ void commitBlockSynchronization(ExtendedBlock lastblock,
|
||||
newtargets[i]);
|
||||
}
|
||||
}
|
||||
if (closeFile) {
|
||||
if ((closeFile) && (descriptors != null)) {
|
||||
// the file is getting closed. Insert block locations into blockManager.
|
||||
// Otherwise fsck will report these blocks as MISSING, especially if the
|
||||
// blocksReceived from Datanodes take a long time to arrive.
|
||||
@ -3283,6 +3299,7 @@ public String toString() {
|
||||
/**
|
||||
* Checks consistency of the class state.
|
||||
* This is costly and currently called only in assert.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean isConsistent() {
|
||||
if (blockTotal == -1 && blockSafe == -1) {
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
|
@ -18,7 +18,6 @@
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
|
@ -372,14 +372,16 @@ static String[] getPathNames(String path) {
|
||||
|
||||
/**
|
||||
* Given some components, create a path name.
|
||||
* @param components
|
||||
* @param components The path components
|
||||
* @param start index
|
||||
* @param end index
|
||||
* @return concatenated path
|
||||
*/
|
||||
static String constructPath(byte[][] components, int start) {
|
||||
static String constructPath(byte[][] components, int start, int end) {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
for (int i = start; i < components.length; i++) {
|
||||
for (int i = start; i < end; i++) {
|
||||
buf.append(DFSUtil.bytes2String(components[i]));
|
||||
if (i < components.length - 1) {
|
||||
if (i < end - 1) {
|
||||
buf.append(Path.SEPARATOR);
|
||||
}
|
||||
}
|
||||
|
@ -23,8 +23,6 @@
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
@ -191,18 +189,19 @@ assert compareBytes(this.name, components[0]) == 0 :
|
||||
existing[index] = curNode;
|
||||
}
|
||||
if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) {
|
||||
final String path = constructPath(components, 0, components.length);
|
||||
final String preceding = constructPath(components, 0, count);
|
||||
final String remainder =
|
||||
constructPath(components, count + 1, components.length);
|
||||
final String link = DFSUtil.bytes2String(components[count]);
|
||||
final String target = ((INodeSymlink)curNode).getLinkValue();
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("UnresolvedPathException " +
|
||||
" count: " + count +
|
||||
" componenent: " + DFSUtil.bytes2String(components[count]) +
|
||||
" full path: " + constructPath(components, 0) +
|
||||
" remaining path: " + constructPath(components, count+1) +
|
||||
" symlink: " + ((INodeSymlink)curNode).getLinkValue());
|
||||
" path: " + path + " preceding: " + preceding +
|
||||
" count: " + count + " link: " + link + " target: " + target +
|
||||
" remainder: " + remainder);
|
||||
}
|
||||
final String linkTarget = ((INodeSymlink)curNode).getLinkValue();
|
||||
throw new UnresolvedPathException(constructPath(components, 0),
|
||||
constructPath(components, count+1),
|
||||
linkTarget);
|
||||
throw new UnresolvedPathException(path, preceding, remainder, target);
|
||||
}
|
||||
if (lastComp || !curNode.isDirectory()) {
|
||||
break;
|
||||
|
@ -17,6 +17,10 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
@ -145,7 +149,7 @@ private Map<String, String> getAuthFilterParams(Configuration conf)
|
||||
}
|
||||
};
|
||||
|
||||
boolean certSSL = conf.getBoolean("dfs.https.enable", false);
|
||||
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
|
||||
boolean useKrb = UserGroupInformation.isSecurityEnabled();
|
||||
if (certSSL || useKrb) {
|
||||
boolean needClientAuth = conf.getBoolean(
|
||||
@ -156,14 +160,14 @@ private Map<String, String> getAuthFilterParams(Configuration conf)
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
|
||||
Configuration sslConf = new HdfsConfiguration(false);
|
||||
if (certSSL) {
|
||||
sslConf.addResource(conf.get(
|
||||
"dfs.https.server.keystore.resource", "ssl-server.xml"));
|
||||
sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||
DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
|
||||
}
|
||||
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
|
||||
useKrb);
|
||||
// assume same ssl port for all datanodes
|
||||
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf
|
||||
.get("dfs.datanode.https.address", infoHost + ":" + 50475));
|
||||
.get(DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
|
||||
httpServer.setAttribute("datanode.https.port", datanodeSslPort
|
||||
.getPort());
|
||||
}
|
||||
|
@ -819,10 +819,6 @@ public void createSymlink(String target, String link, FsPermission dirPerms,
|
||||
public String getLinkTarget(String path) throws IOException {
|
||||
nn.checkOperation(OperationCategory.READ);
|
||||
metrics.incrGetLinkTargetOps();
|
||||
/* Resolves the first symlink in the given path, returning a
|
||||
* new path consisting of the target of the symlink and any
|
||||
* remaining path components from the original path.
|
||||
*/
|
||||
try {
|
||||
HdfsFileStatus stat = namesystem.getFileInfo(path, false);
|
||||
if (stat != null) {
|
||||
|
@ -29,7 +29,6 @@
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -380,7 +380,13 @@ static void redirectToRandomDataNode(ServletContext context,
|
||||
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
|
||||
final Configuration conf = (Configuration) context
|
||||
.getAttribute(JspHelper.CURRENT_CONF);
|
||||
final DatanodeID datanode = getRandomDatanode(nn);
|
||||
// We can't redirect if there isn't a DN to redirect to.
|
||||
// Lets instead show a proper error message.
|
||||
if (nn.getNamesystem().getNumLiveDataNodes() < 1) {
|
||||
throw new IOException("Can't browse the DFS since there are no " +
|
||||
"live nodes available to redirect to.");
|
||||
}
|
||||
final DatanodeID datanode = getRandomDatanode(nn);;
|
||||
UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
|
||||
String tokenString = getDelegationToken(
|
||||
nn.getRpcServer(), request, conf, ugi);
|
||||
|
@ -29,7 +29,6 @@
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
|
@ -20,8 +20,6 @@
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
|
@ -20,17 +20,12 @@
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.BlockWritable;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeInfoWritable;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
|
@ -34,7 +34,6 @@
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
|
@ -40,7 +40,6 @@
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.HftpFileSystem;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
|
@ -18,8 +18,6 @@
|
||||
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
|
@ -22,8 +22,6 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
|
||||
import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.ByteToken;
|
||||
import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.ShortToken;
|
||||
import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.IntToken;
|
||||
|
@ -18,8 +18,6 @@
|
||||
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
|
@ -18,8 +18,6 @@
|
||||
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
|
@ -19,11 +19,6 @@
|
||||
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
|
@ -22,7 +22,6 @@
|
||||
import java.util.Collection;
|
||||
import java.util.ConcurrentModificationException;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
|
@ -122,11 +122,17 @@ creations/deletions), or "all".</description>
|
||||
<property>
|
||||
<name>dfs.datanode.https.address</name>
|
||||
<value>0.0.0.0:50475</value>
|
||||
<description>The datanode secure http server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.https-address</name>
|
||||
<value>0.0.0.0:50470</value>
|
||||
<description>The namenode secure http server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
@ -193,6 +199,14 @@ creations/deletions), or "all".</description>
|
||||
directories, for redundancy. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir.restore</name>
|
||||
<value>false</value>
|
||||
<description>Set to true to enable NameNode to attempt recovering a
|
||||
previously failed dfs.name.dir. When enabled, a recovery of any failed
|
||||
directory is attempted during checkpoint.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.fs-limits.max-component-length</name>
|
||||
<value>0</value>
|
||||
|
@ -0,0 +1,78 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains protocol buffers that are used throughout HDFS -- i.e.
|
||||
// by the client, server, and data transfer protocols.
|
||||
|
||||
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||
option java_outer_classname = "InterDatanodeProtocolProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
import "hdfs.proto";
|
||||
|
||||
/**
|
||||
* Block with location information and new generation stamp
|
||||
* to be used for recovery.
|
||||
*/
|
||||
message InitReplicaRecoveryRequestProto {
|
||||
required RecoveringBlockProto block = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Repica recovery information
|
||||
*/
|
||||
message InitReplicaRecoveryResponseProto {
|
||||
required ReplicaState state = 1; // State fo the replica
|
||||
required BlockProto block = 2; // block information
|
||||
}
|
||||
|
||||
/**
|
||||
* Update replica with new generation stamp and length
|
||||
*/
|
||||
message UpdateReplicaUnderRecoveryRequestProto {
|
||||
required ExtendedBlockProto block = 1; // Block identifier
|
||||
required uint64 recoveryId = 2; // New genstamp of the replica
|
||||
required uint64 newLength = 3; // New length of the replica
|
||||
}
|
||||
|
||||
/**
|
||||
* Response returns updated block information
|
||||
*/
|
||||
message UpdateReplicaUnderRecoveryResponseProto {
|
||||
required ExtendedBlockProto block = 1; // Updated block information
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol used between datanodes for block recovery.
|
||||
*
|
||||
* See the request and response for details of rpc call.
|
||||
*/
|
||||
service InterDatanodeProtocolService {
|
||||
/**
|
||||
* Initialize recovery of a replica
|
||||
*/
|
||||
rpc initReplicaRecovery(InitReplicaRecoveryRequestProto)
|
||||
returns(InitReplicaRecoveryResponseProto);
|
||||
|
||||
/**
|
||||
* Update a replica with new generation stamp and length
|
||||
*/
|
||||
rpc updateReplicaUnderRecovery(UpdateReplicaUnderRecoveryRequestProto)
|
||||
returns(UpdateReplicaUnderRecoveryResponseProto);
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains protocol buffers that are used throughout HDFS -- i.e.
|
||||
// by the client, server, and data transfer protocols.
|
||||
|
||||
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||
option java_outer_classname = "JournalProtocolProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
import "hdfs.proto";
|
||||
|
||||
/**
|
||||
* registration - the registration info of the active NameNode
|
||||
* firstTxnId - the first txid in the rolled edit log
|
||||
* numTxns - Number of transactions in editlog
|
||||
* records - bytes containing serialized journal records
|
||||
*/
|
||||
message JournalRequestProto {
|
||||
required NamenodeRegistrationProto registration = 1; // Registration info
|
||||
required uint64 firstTxnId = 2; // Transaction ID
|
||||
required uint32 numTxns = 3; // Transaction ID
|
||||
required bytes records = 4; // Journal record
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message JournalResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - the registration info of the active NameNode
|
||||
* txid - first txid in the new log
|
||||
*/
|
||||
message StartLogSegmentRequestProto {
|
||||
required NamenodeRegistrationProto registration = 1; // Registration info
|
||||
required uint64 txid = 2; // Transaction ID
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message StartLogSegmentResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol used to journal edits to a remote node. Currently,
|
||||
* this is used to publish edits from the NameNode to a BackupNode.
|
||||
*
|
||||
* See the request and response for details of rpc call.
|
||||
*/
|
||||
service JournalProtocolService {
|
||||
/**
|
||||
* Request sent by active namenode to backup node via
|
||||
* EditLogBackupOutputStream to stream editlog records.
|
||||
*/
|
||||
rpc journal(JournalRequestProto) returns (JournalResponseProto);
|
||||
|
||||
/**
|
||||
* Request sent by active namenode to backup node to notify
|
||||
* that the NameNode has rolled its edit logs and is now writing a
|
||||
* new log segment.
|
||||
*/
|
||||
rpc startLogSegment(StartLogSegmentRequestProto)
|
||||
returns (StartLogSegmentResponseProto);
|
||||
}
|
225
hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto
Normal file
225
hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto
Normal file
@ -0,0 +1,225 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains protocol buffers that are used throughout HDFS -- i.e.
|
||||
// by the client, server, and data transfer protocols.
|
||||
|
||||
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||
option java_outer_classname = "NamenodeProtocolProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
import "hdfs.proto";
|
||||
|
||||
/**
|
||||
* Get list of blocks for a given datanode with the total length
|
||||
* of adding up to given size
|
||||
* datanode - Datanode ID to get list of block from
|
||||
* size - size to which the block lengths must add up to
|
||||
*/
|
||||
message GetBlocksRequestProto {
|
||||
required DatanodeIDProto datanode = 1; // Datanode ID
|
||||
required uint64 size = 2; // Size in bytes
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* blocks - List of returned blocks
|
||||
*/
|
||||
message GetBlocksResponseProto {
|
||||
required BlockWithLocationsProto blocks = 1; // List of blocks
|
||||
}
|
||||
|
||||
/**
|
||||
* void request
|
||||
*/
|
||||
message GetBlockKeysRequestProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* keys - Information about block keys at the active namenode
|
||||
*/
|
||||
message GetBlockKeysResponseProto {
|
||||
required ExportedBlockKeysProto keys = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* void request
|
||||
*/
|
||||
message GetTransactionIdRequestProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* txId - Transaction ID of the most recently persisted edit log record
|
||||
*/
|
||||
message GetTransactionIdResponseProto {
|
||||
required uint64 txId = 1; // Transaction ID
|
||||
}
|
||||
|
||||
/**
|
||||
* void request
|
||||
*/
|
||||
message RollEditLogRequestProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* signature - A unique token to identify checkpoint transaction
|
||||
*/
|
||||
message RollEditLogResponseProto {
|
||||
required CheckpointSignatureProto signature = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* registartion - Namenode reporting the error
|
||||
* errorCode - error code indicating the error
|
||||
* msg - Free text description of the error
|
||||
*/
|
||||
message ErrorReportRequestProto {
|
||||
required NamenodeRegistrationProto registartion = 1; // Registartion info
|
||||
required uint32 errorCode = 2; // Error code
|
||||
required string msg = 3; // Error message
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message ErrorReportResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - Information of the namenode registering with primary namenode
|
||||
*/
|
||||
message RegisterRequestProto {
|
||||
required NamenodeRegistrationProto registration = 1; // Registration info
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - Updated registration information of the newly registered
|
||||
* datanode.
|
||||
*/
|
||||
message RegisterResponseProto {
|
||||
required NamenodeRegistrationProto registration = 1; // Registration info
|
||||
}
|
||||
|
||||
/**
|
||||
* Start checkpoint request
|
||||
* registration - Namenode that is starting the checkpoint
|
||||
*/
|
||||
message StartCheckpointRequestProto {
|
||||
required NamenodeRegistrationProto registration = 1; // Registration info
|
||||
}
|
||||
|
||||
/**
|
||||
* command - Command returned by the active namenode to be
|
||||
* be handled by the caller.
|
||||
*/
|
||||
message StartCheckpointResponseProto {
|
||||
required NamenodeCommandProto command = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* End or finalize the previously started checkpoint
|
||||
* registration - Namenode that is ending the checkpoint
|
||||
* signature - unique token to identify checkpoint transaction,
|
||||
* that was received when checkpoint was started.
|
||||
*/
|
||||
message EndCheckpointRequestProto {
|
||||
required NamenodeRegistrationProto registration = 1; // Registration info
|
||||
required CheckpointSignatureProto signature = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* void response
|
||||
*/
|
||||
message EndCheckpointResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* sinceTxId - return the editlog information for transactions >= sinceTxId
|
||||
*/
|
||||
message GetEditLogManifestRequestProto {
|
||||
required uint64 sinceTxId = 1; // Transaction ID
|
||||
}
|
||||
|
||||
/**
|
||||
* manifest - Enumeration of editlogs from namenode for
|
||||
* logs >= sinceTxId in the request
|
||||
*/
|
||||
message GetEditLogManifestResponseProto {
|
||||
required RemoteEditLogManifestProto manifest = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol used by the sub-ordinate namenode to send requests
|
||||
* the active/primary namenode.
|
||||
*
|
||||
* See the request and response for details of rpc call.
|
||||
*/
|
||||
service NamenodeProtocolService {
|
||||
/**
|
||||
* Get list of blocks for a given datanode with length
|
||||
* of blocks adding up to given size.
|
||||
*/
|
||||
rpc getBlocks(GetBlocksRequestProto) returns(GetBlocksResponseProto);
|
||||
|
||||
/**
|
||||
* Get the current block keys
|
||||
*/
|
||||
rpc getBlockKeys(GetBlockKeysRequestProto) returns(GetBlockKeysResponseProto);
|
||||
|
||||
/**
|
||||
* Get the transaction ID of the most recently persisted editlog record
|
||||
*/
|
||||
rpc getTransationId(GetTransactionIdRequestProto)
|
||||
returns(GetTransactionIdResponseProto);
|
||||
|
||||
/**
|
||||
* Close the current editlog and open a new one for checkpointing purposes
|
||||
*/
|
||||
rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto);
|
||||
|
||||
/**
|
||||
* Report from a sub-ordinate namenode of an error to the active namenode.
|
||||
* Active namenode may decide to unregister the reporting namenode
|
||||
* depending on the error.
|
||||
*/
|
||||
rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto);
|
||||
|
||||
/**
|
||||
* Request to register a sub-ordinate namenode
|
||||
*/
|
||||
rpc register(RegisterRequestProto) returns(RegisterResponseProto);
|
||||
|
||||
/**
|
||||
* Request to start a checkpoint.
|
||||
*/
|
||||
rpc startCheckpoint(StartCheckpointRequestProto)
|
||||
returns(StartCheckpointResponseProto);
|
||||
|
||||
/**
|
||||
* End of finalize the previously started checkpoint
|
||||
*/
|
||||
rpc endCheckpoint(EndCheckpointRequestProto)
|
||||
returns(EndCheckpointResponseProto);
|
||||
|
||||
/**
|
||||
* Get editlog manifests from the active namenode for all the editlogs
|
||||
*/
|
||||
rpc getEditLogManifest(GetEditLogManifestRequestProto)
|
||||
returns(GetEditLogManifestResponseProto);
|
||||
}
|
@ -53,6 +53,12 @@ message DatanodeIDProto {
|
||||
required uint32 ipcPort = 4; // the port where the ipc Server is running
|
||||
}
|
||||
|
||||
/**
|
||||
* DatanodeID array
|
||||
*/
|
||||
message DatanodeIDsProto {
|
||||
repeated DatanodeIDProto datanodes = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* The status of a Datanode
|
||||
@ -76,7 +82,6 @@ message DatanodeInfoProto {
|
||||
optional AdminState adminState = 10;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Summary of a file or directory
|
||||
*/
|
||||
@ -152,10 +157,10 @@ message HdfsFileStatusProto {
|
||||
required string group = 6;
|
||||
required uint64 modification_time = 7;
|
||||
required uint64 access_time = 8;
|
||||
//
|
||||
|
||||
// Optional fields for symlink
|
||||
optional bytes symlink = 9; // if symlink, target encoded java UTF8
|
||||
//
|
||||
|
||||
// Optional fields for file
|
||||
optional uint32 block_replication = 10; // Actually a short - only 16bits used
|
||||
optional uint64 blocksize = 11;
|
||||
@ -187,5 +192,156 @@ message DirectoryListingProto {
|
||||
*/
|
||||
message UpgradeStatusReportProto {
|
||||
required uint32 version = 1;;
|
||||
required uint32 upgradeStatus = 2; // Between 0 and 100 indicating the % complete
|
||||
required uint32 upgradeStatus = 2; // % completed in range 0 & 100
|
||||
}
|
||||
|
||||
/**
|
||||
* Common node information shared by all the nodes in the cluster
|
||||
*/
|
||||
message StorageInfoProto {
|
||||
required uint32 layoutVersion = 1; // Layout version of the file system
|
||||
required uint32 namespceID = 2; // File system namespace ID
|
||||
required string clusterID = 3; // ID of the cluster
|
||||
required uint64 cTime = 4; // File system creation time
|
||||
}
|
||||
|
||||
/**
|
||||
* Information sent by a namenode to identify itself to the primary namenode.
|
||||
*/
|
||||
message NamenodeRegistrationProto {
|
||||
required string rpcAddress = 1; // host:port of the namenode RPC address
|
||||
required string httpAddress = 2; // host:port of the namenode http server
|
||||
enum NamenodeRoleProto {
|
||||
NAMENODE = 1;
|
||||
BACKUP = 2;
|
||||
CHECKPOINT = 3;
|
||||
}
|
||||
required StorageInfoProto storageInfo = 3; // Node information
|
||||
optional NamenodeRoleProto role = 4; // Namenode role
|
||||
}
|
||||
|
||||
/**
|
||||
* Unique signature to identify checkpoint transactions.
|
||||
*/
|
||||
message CheckpointSignatureProto {
|
||||
required string blockPoolId = 1;
|
||||
required uint64 mostRecentCheckpointTxId = 2;
|
||||
required uint64 curSegmentTxId = 3;
|
||||
required StorageInfoProto storageInfo = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Command sent from one namenode to another namenode.
|
||||
*/
|
||||
message NamenodeCommandProto {
|
||||
enum Type {
|
||||
NamenodeCommand = 0; // Base command
|
||||
CheckPointCommand = 1; // Check point command
|
||||
}
|
||||
required uint32 action = 1;
|
||||
required Type type = 2;
|
||||
optional CheckpointCommandProto checkpointCmd = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Command returned from primary to checkpointing namenode.
|
||||
* This command has checkpoint signature that identifies
|
||||
* checkpoint transaction and is needed for further
|
||||
* communication related to checkpointing.
|
||||
*/
|
||||
message CheckpointCommandProto {
|
||||
// Unique signature to identify checkpoint transation
|
||||
required CheckpointSignatureProto signature = 1;
|
||||
|
||||
// If true, return transfer image to primary upon the completion of checkpoint
|
||||
required bool needToReturnImage = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Block information
|
||||
*/
|
||||
message BlockProto {
|
||||
required uint64 blockId = 1;
|
||||
required uint64 genStamp = 2;
|
||||
optional uint64 numBytes = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Block and datanodes where is it located
|
||||
*/
|
||||
message BlockWithLocationsProto {
|
||||
required BlockProto block = 1; // Block
|
||||
repeated DatanodeIDProto datanodeIDs = 2; // Datanodes with replicas of the block
|
||||
}
|
||||
|
||||
/**
|
||||
* List of block with locations
|
||||
*/
|
||||
message BlocksWithLocationsProto {
|
||||
repeated BlockWithLocationsProto blocks = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Editlog information with available transactions
|
||||
*/
|
||||
message RemoteEditLogProto {
|
||||
required uint64 startTxId = 1; // Starting available edit log transaction
|
||||
required uint64 endTxId = 2; // Ending available edit log transaction
|
||||
}
|
||||
|
||||
/**
|
||||
* Enumeration of editlogs available on a remote namenode
|
||||
*/
|
||||
message RemoteEditLogManifestProto {
|
||||
repeated RemoteEditLogProto logs = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Namespace information that describes namespace on a namenode
|
||||
*/
|
||||
message NamespaceInfoProto {
|
||||
required string buildVersion = 1; // Software build version
|
||||
required uint32 distUpgradeVersion = 2; // Distributed upgrade version
|
||||
required string blockPoolID = 3; // block pool used by the namespace
|
||||
required StorageInfoProto storageInfo = 4;// Noe information
|
||||
}
|
||||
|
||||
/**
|
||||
* Block access token information
|
||||
*/
|
||||
message BlockKeyProto {
|
||||
required uint32 keyId = 1; // Key identifier
|
||||
required uint64 expiryDate = 2; // Expiry time in milliseconds
|
||||
required bytes keyBytes = 3; // Key secret
|
||||
}
|
||||
|
||||
/**
|
||||
* Current key and set of block keys at the namenode.
|
||||
*/
|
||||
message ExportedBlockKeysProto {
|
||||
required bool isBlockTokenEnabled = 1;
|
||||
required uint64 keyUpdateInterval = 2;
|
||||
required uint64 tokenLifeTime = 3;
|
||||
required BlockKeyProto currentKey = 4;
|
||||
repeated BlockKeyProto allKeys = 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* State of a block replica at a datanode
|
||||
*/
|
||||
enum ReplicaState {
|
||||
FINALIZED = 0; // State of a replica when it is not modified
|
||||
RBW = 1; // State of replica that is being written to
|
||||
RWR = 2; // State of replica that is waiting to be recovered
|
||||
RUR = 3; // State of replica that is under recovery
|
||||
TEMPORARY = 4; // State of replica that is created for replication
|
||||
}
|
||||
|
||||
/**
|
||||
* Block that needs to be recovered with at a given location
|
||||
*/
|
||||
message RecoveringBlockProto {
|
||||
required uint64 newGenStamp = 1; // New genstamp post recovery
|
||||
required LocatedBlockProto block = 2; // Block to be recovered
|
||||
}
|
||||
|
||||
|
@ -1,41 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
/**
|
||||
* This aspect takes care about faults injected into datanode.DataXceiver
|
||||
* class
|
||||
*/
|
||||
privileged public aspect DataXceiverAspects {
|
||||
public static final Log LOG = LogFactory.getLog(DataXceiverAspects.class);
|
||||
|
||||
pointcut runXceiverThread(DataXceiver xceiver) :
|
||||
execution (* run(..)) && target(xceiver);
|
||||
|
||||
void around (DataXceiver xceiver) : runXceiverThread(xceiver) {
|
||||
if ("true".equals(System.getProperty("fi.enabledOOM"))) {
|
||||
LOG.info("fi.enabledOOM is enabled");
|
||||
throw new OutOfMemoryError("Pretend there's no more memory");
|
||||
} else {
|
||||
proceed(xceiver);
|
||||
}
|
||||
}
|
||||
}
|
@ -29,6 +29,7 @@
|
||||
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
|
||||
import org.apache.hadoop.fi.FiTestUtil;
|
||||
import org.apache.hadoop.fi.FiTestUtil.Action;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
@ -56,8 +57,9 @@ public class TestFiDataTransferProtocol {
|
||||
|
||||
static private FSDataOutputStream createFile(FileSystem fs, Path p
|
||||
) throws IOException {
|
||||
return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096),
|
||||
REPLICATION, BLOCKSIZE);
|
||||
return fs.create(p, true,
|
||||
fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,
|
||||
4096), REPLICATION, BLOCKSIZE);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -30,6 +30,7 @@
|
||||
import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
|
||||
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
|
||||
import org.apache.hadoop.fi.FiTestUtil;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
@ -65,8 +66,8 @@ public class TestFiDataTransferProtocol2 {
|
||||
|
||||
static private FSDataOutputStream createFile(FileSystem fs, Path p
|
||||
) throws IOException {
|
||||
return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096),
|
||||
REPLICATION, BLOCKSIZE);
|
||||
return fs.create(p, true, fs.getConf()
|
||||
.getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCKSIZE);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1,97 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
/**
|
||||
* This is a test for DataXceiverServer when DataXceiver thread spawning is
|
||||
* failed due to OutOfMemoryError. Expected behavior is that DataXceiverServer
|
||||
* should not be exited. It should retry again after 30 seconds
|
||||
*/
|
||||
public class TestFiDataXceiverServer {
|
||||
|
||||
@Test(timeout = 30000)
|
||||
public void testOutOfMemoryErrorInDataXceiverServerRun() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
ServerSocket sock = new ServerSocket() {
|
||||
@Override
|
||||
public Socket accept() throws IOException {
|
||||
return new Socket() {
|
||||
@Override
|
||||
public InetAddress getInetAddress() {
|
||||
return super.getLocalAddress();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SocketAddress getRemoteSocketAddress() {
|
||||
return new InetSocketAddress(8080);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SocketAddress getLocalSocketAddress() {
|
||||
return new InetSocketAddress(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
latch.countDown();
|
||||
super.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() throws IOException {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
Thread thread = null;
|
||||
System.setProperty("fi.enabledOOM", "true");
|
||||
DataNode dn = Mockito.mock(DataNode.class);
|
||||
try {
|
||||
Configuration conf = new Configuration();
|
||||
Mockito.doReturn(conf).when(dn).getConf();
|
||||
dn.shouldRun = true;
|
||||
DataXceiverServer server = new DataXceiverServer(sock, conf, dn);
|
||||
thread = new Thread(server);
|
||||
thread.start();
|
||||
latch.await();
|
||||
assertTrue("Not running the thread", thread.isAlive());
|
||||
} finally {
|
||||
System.setProperty("fi.enabledOOM", "false");
|
||||
dn.shouldRun = false;
|
||||
if (null != thread)
|
||||
thread.interrupt();
|
||||
sock.close();
|
||||
}
|
||||
}
|
||||
}
|
@ -18,7 +18,6 @@
|
||||
package org.apache.hadoop.cli;
|
||||
|
||||
import org.apache.hadoop.cli.util.CLICommandDFSAdmin;
|
||||
import org.apache.hadoop.cli.util.CLITestCmd;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
public class CLITestHelperDFS extends CLITestHelper {
|
||||
|
@ -29,7 +29,6 @@
|
||||
import org.junit.After;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestHDFSCLI extends CLITestHelperDFS {
|
||||
|
||||
|
@ -20,6 +20,9 @@
|
||||
import java.io.*;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.log4j.Level;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
@ -28,9 +31,11 @@
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import static org.apache.hadoop.fs.FileContextTestHelper.*;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import org.junit.Test;
|
||||
import org.junit.BeforeClass;
|
||||
@ -41,6 +46,10 @@
|
||||
*/
|
||||
public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
|
||||
|
||||
{
|
||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
|
||||
protected String getScheme() {
|
||||
@ -250,8 +259,8 @@ public void testLinkOwner() throws IOException {
|
||||
Path link = new Path(testBaseDir1(), "symlinkToFile");
|
||||
createAndWriteFile(file);
|
||||
fc.createSymlink(file, link, false);
|
||||
FileStatus stat_file = fc.getFileStatus(file);
|
||||
FileStatus stat_link = fc.getFileStatus(link);
|
||||
assertEquals(stat_link.getOwner(), stat_file.getOwner());
|
||||
FileStatus statFile = fc.getFileStatus(file);
|
||||
FileStatus statLink = fc.getFileStatus(link);
|
||||
assertEquals(statLink.getOwner(), statFile.getOwner());
|
||||
}
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ public void testGeneralSBBehavior() throws IOException, InterruptedException {
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
|
||||
conf.setBoolean("dfs.support.append", true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
||||
|
||||
FileSystem hdfs = cluster.getFileSystem();
|
||||
|
@ -143,8 +143,8 @@ static byte[] initBuffer(int size) {
|
||||
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
|
||||
throws IOException {
|
||||
return fileSys.create(name, true,
|
||||
fileSys.getConf().getInt("io.file.buffer.size", 4096),
|
||||
(short) repl, (long) BLOCK_SIZE);
|
||||
fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
(short) repl, BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -30,6 +30,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
@ -148,7 +149,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR
|
||||
sock, targetAddr.toString()+ ":" + block.getBlockId(), block,
|
||||
testBlock.getBlockToken(),
|
||||
offset, lenToRead,
|
||||
conf.getInt("io.file.buffer.size", 4096),
|
||||
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
true, "");
|
||||
}
|
||||
|
||||
|
@ -84,6 +84,7 @@ public class DFSTestUtil {
|
||||
|
||||
private int maxLevels;// = 3;
|
||||
private int maxSize;// = 8*1024;
|
||||
private int minSize = 1;
|
||||
private int nFiles;
|
||||
private MyFile[] files;
|
||||
|
||||
@ -139,7 +140,7 @@ private class MyFile {
|
||||
long fidx = -1;
|
||||
while (fidx < 0) { fidx = gen.nextLong(); }
|
||||
name = name + Long.toString(fidx);
|
||||
size = gen.nextInt(maxSize);
|
||||
size = minSize + gen.nextInt(maxSize - minSize);
|
||||
seed = gen.nextLong();
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.junit.AfterClass;
|
||||
@ -66,7 +67,7 @@ public static void startUp () throws IOException {
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() throws IOException {
|
||||
public static void tearDown() {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@ -91,7 +92,7 @@ public void testAppend() throws IOException {
|
||||
new Path("foo"+ oldFileLen +"_"+ flushedBytes1 +"_"+ flushedBytes2);
|
||||
LOG.info("Creating file " + p);
|
||||
FSDataOutputStream out = fs.create(p, false,
|
||||
conf.getInt("io.file.buffer.size", 4096),
|
||||
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
REPLICATION, BLOCK_SIZE);
|
||||
out.write(contents, 0, oldFileLen);
|
||||
out.close();
|
||||
|
@ -36,21 +36,22 @@
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.math.stat.descriptive.rank.Min;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||
@ -323,8 +324,8 @@ public MiniDFSCluster() {
|
||||
* Servers will be started on free ports.
|
||||
* <p>
|
||||
* The caller must manage the creation of NameNode and DataNode directories
|
||||
* and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf.
|
||||
* and have already set {@link #DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link #DFS_DATANODE_DATA_DIR_KEY} in the given conf.
|
||||
*
|
||||
* @param conf the base configuration to use in starting the servers. This
|
||||
* will be modified as necessary.
|
||||
@ -398,8 +399,8 @@ public MiniDFSCluster(Configuration conf,
|
||||
* @param format if true, format the NameNode and DataNodes before starting
|
||||
* up
|
||||
* @param manageDfsDirs if true, the data directories for servers will be
|
||||
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
|
||||
* created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in
|
||||
* the conf
|
||||
* @param operation the operation with which to start the servers. If null
|
||||
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
|
||||
@ -430,8 +431,8 @@ public MiniDFSCluster(int nameNodePort,
|
||||
* @param numDataNodes Number of DataNodes to start; may be zero
|
||||
* @param format if true, format the NameNode and DataNodes before starting up
|
||||
* @param manageDfsDirs if true, the data directories for servers will be
|
||||
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
|
||||
* created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in
|
||||
* the conf
|
||||
* @param operation the operation with which to start the servers. If null
|
||||
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
|
||||
@ -464,11 +465,11 @@ public MiniDFSCluster(int nameNodePort,
|
||||
* @param numDataNodes Number of DataNodes to start; may be zero
|
||||
* @param format if true, format the NameNode and DataNodes before starting up
|
||||
* @param manageNameDfsDirs if true, the data directories for servers will be
|
||||
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
|
||||
* created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and
|
||||
* {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in
|
||||
* the conf
|
||||
* @param manageDataDfsDirs if true, the data directories for datanodes will
|
||||
* be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}
|
||||
* be created and {@link #DFS_DATANODE_DATA_DIR_KEY}
|
||||
* set to same in the conf
|
||||
* @param operation the operation with which to start the servers. If null
|
||||
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
|
||||
@ -513,6 +514,8 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort,
|
||||
try {
|
||||
Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
|
||||
setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
|
||||
setRpcEngine(conf, ClientNamenodeWireProtocol.class, rpcEngine);
|
||||
setRpcEngine(conf, ClientDatanodeWireProtocol.class, rpcEngine);
|
||||
setRpcEngine(conf, NamenodeProtocol.class, rpcEngine);
|
||||
setRpcEngine(conf, ClientProtocol.class, rpcEngine);
|
||||
setRpcEngine(conf, DatanodeProtocol.class, rpcEngine);
|
||||
@ -524,15 +527,15 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort,
|
||||
}
|
||||
|
||||
// disable service authorization, as it does not work with tunnelled RPC
|
||||
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
|
||||
conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION,
|
||||
false);
|
||||
}
|
||||
|
||||
int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
|
||||
conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
|
||||
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
||||
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
|
||||
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
|
||||
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||
StaticMapping.class, DNSToSwitchMapping.class);
|
||||
|
||||
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
|
||||
@ -540,8 +543,8 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort,
|
||||
federation = true;
|
||||
|
||||
if (!federation) {
|
||||
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:"
|
||||
conf.set(FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort);
|
||||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:"
|
||||
+ nameNodeHttpPort);
|
||||
NameNode nn = createNameNode(0, conf, numDataNodes, manageNameDfsDirs,
|
||||
format, operation, clusterId);
|
||||
@ -585,7 +588,7 @@ private static void initFederationConf(Configuration conf,
|
||||
initFederatedNamenodeAddress(conf, nameserviceId, nnPort);
|
||||
nnPort = nnPort == 0 ? 0 : nnPort + 2;
|
||||
}
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIdList);
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIdList);
|
||||
}
|
||||
|
||||
/* For federated namenode initialize the address:port */
|
||||
@ -593,11 +596,11 @@ private static void initFederatedNamenodeAddress(Configuration conf,
|
||||
String nameserviceId, int nnPort) {
|
||||
// Set nameserviceId specific key
|
||||
String key = DFSUtil.addKeySuffixes(
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId);
|
||||
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId);
|
||||
conf.set(key, "127.0.0.1:0");
|
||||
|
||||
key = DFSUtil.addKeySuffixes(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId);
|
||||
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId);
|
||||
conf.set(key, "127.0.0.1:" + nnPort);
|
||||
}
|
||||
|
||||
@ -618,10 +621,10 @@ private NameNode createNameNode(int nnIndex, Configuration conf,
|
||||
StartupOption operation, String clusterId)
|
||||
throws IOException {
|
||||
if (manageNameDfsDirs) {
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
|
||||
fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+
|
||||
fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2))));
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
|
||||
conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
|
||||
fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+
|
||||
fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
|
||||
}
|
||||
@ -646,17 +649,17 @@ private void createFederatedNameNode(int nnIndex, Configuration conf,
|
||||
int numDataNodes, boolean manageNameDfsDirs, boolean format,
|
||||
StartupOption operation, String clusterId, String nameserviceId)
|
||||
throws IOException {
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
|
||||
NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
|
||||
format, operation, clusterId);
|
||||
conf.set(DFSUtil.addKeySuffixes(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode
|
||||
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode
|
||||
.getHostPortString(nn.getNameNodeAddress()));
|
||||
conf.set(DFSUtil.addKeySuffixes(
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode
|
||||
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode
|
||||
.getHostPortString(nn.getHttpAddress()));
|
||||
DFSUtil.setGenericConf(conf, nameserviceId,
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
|
||||
}
|
||||
|
||||
@ -736,7 +739,7 @@ public void waitClusterUp() {
|
||||
* will be modified as necessary.
|
||||
* @param numDataNodes Number of DataNodes to start; may be zero
|
||||
* @param manageDfsDirs if true, the data directories for DataNodes will be
|
||||
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set
|
||||
* created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be set
|
||||
* in the conf
|
||||
* @param operation the operation with which to start the DataNodes. If null
|
||||
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
|
||||
@ -768,7 +771,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
||||
* will be modified as necessary.
|
||||
* @param numDataNodes Number of DataNodes to start; may be zero
|
||||
* @param manageDfsDirs if true, the data directories for DataNodes will be
|
||||
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
|
||||
* created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
|
||||
* set in the conf
|
||||
* @param operation the operation with which to start the DataNodes. If null
|
||||
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
|
||||
@ -802,7 +805,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
||||
* will be modified as necessary.
|
||||
* @param numDataNodes Number of DataNodes to start; may be zero
|
||||
* @param manageDfsDirs if true, the data directories for DataNodes will be
|
||||
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
|
||||
* created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
|
||||
* set in the conf
|
||||
* @param operation the operation with which to start the DataNodes. If null
|
||||
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
|
||||
@ -820,12 +823,12 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
||||
long[] simulatedCapacities,
|
||||
boolean setupHostsFile,
|
||||
boolean checkDataNodeAddrConfig) throws IOException {
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
|
||||
conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
|
||||
|
||||
int curDatanodesNum = dataNodes.size();
|
||||
// for mincluster's the default initialDelay for BRs is 0
|
||||
if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
|
||||
if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
|
||||
conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
|
||||
}
|
||||
// If minicluster's name node is null assume that the conf has been
|
||||
// set with the right address:port of the name node.
|
||||
@ -872,8 +875,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
||||
+ i + ": " + dir1 + " or " + dir2);
|
||||
}
|
||||
String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2);
|
||||
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
|
||||
dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
|
||||
conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
|
||||
}
|
||||
if (simulatedCapacities != null) {
|
||||
dnConf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
|
||||
@ -902,7 +905,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
||||
DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
|
||||
if(dn == null)
|
||||
throw new IOException("Cannot start DataNode in "
|
||||
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
|
||||
+ dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
|
||||
//since the HDFS does things based on IP:port, we need to add the mapping
|
||||
//for IP:port to rackId
|
||||
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
|
||||
@ -1318,7 +1321,7 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop,
|
||||
Configuration newconf = new HdfsConfiguration(conf); // save cloned config
|
||||
if (keepPort) {
|
||||
InetSocketAddress addr = dnprop.datanode.getSelfAddr();
|
||||
conf.set("dfs.datanode.address", addr.getAddress().getHostAddress() + ":"
|
||||
conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":"
|
||||
+ addr.getPort());
|
||||
}
|
||||
dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
|
||||
@ -1445,10 +1448,10 @@ public FileSystem getNewFileSystemInstance(int nnIndex) throws IOException {
|
||||
/**
|
||||
* @return a http URL
|
||||
*/
|
||||
public String getHttpUri(int nnIndex) throws IOException {
|
||||
public String getHttpUri(int nnIndex) {
|
||||
return "http://"
|
||||
+ nameNodes[nnIndex].conf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
.get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1457,7 +1460,7 @@ public String getHttpUri(int nnIndex) throws IOException {
|
||||
public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException {
|
||||
String uri = "hftp://"
|
||||
+ nameNodes[nnIndex].conf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
.get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
try {
|
||||
return (HftpFileSystem)FileSystem.get(new URI(uri), conf);
|
||||
} catch (URISyntaxException e) {
|
||||
@ -1907,9 +1910,9 @@ public NameNode addNameNode(Configuration conf, int namenodePort)
|
||||
nameNodes = newlist;
|
||||
String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1);
|
||||
|
||||
String nameserviceIds = conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES);
|
||||
String nameserviceIds = conf.get(DFS_FEDERATION_NAMESERVICES);
|
||||
nameserviceIds += "," + nameserviceId;
|
||||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIds);
|
||||
conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIds);
|
||||
|
||||
initFederatedNamenodeAddress(conf, nameserviceId, namenodePort);
|
||||
createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null,
|
||||
@ -1942,28 +1945,28 @@ private int getFreeSocketPort() {
|
||||
private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
|
||||
boolean checkDataNodeAddrConfig) throws IOException {
|
||||
if (setupHostsFile) {
|
||||
String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim();
|
||||
String hostsFile = conf.get(DFS_HOSTS, "").trim();
|
||||
if (hostsFile.length() == 0) {
|
||||
throw new IOException("Parameter dfs.hosts is not setup in conf");
|
||||
}
|
||||
// Setup datanode in the include file, if it is defined in the conf
|
||||
String address = "127.0.0.1:" + getFreeSocketPort();
|
||||
if (checkDataNodeAddrConfig) {
|
||||
conf.setIfUnset("dfs.datanode.address", address);
|
||||
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
|
||||
} else {
|
||||
conf.set("dfs.datanode.address", address);
|
||||
conf.set(DFS_DATANODE_ADDRESS_KEY, address);
|
||||
}
|
||||
addToFile(hostsFile, address);
|
||||
LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
|
||||
} else {
|
||||
if (checkDataNodeAddrConfig) {
|
||||
conf.setIfUnset("dfs.datanode.address", "127.0.0.1:0");
|
||||
conf.setIfUnset("dfs.datanode.http.address", "127.0.0.1:0");
|
||||
conf.setIfUnset("dfs.datanode.ipc.address", "127.0.0.1:0");
|
||||
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
|
||||
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
||||
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
|
||||
} else {
|
||||
conf.set("dfs.datanode.address", "127.0.0.1:0");
|
||||
conf.set("dfs.datanode.http.address", "127.0.0.1:0");
|
||||
conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
|
||||
conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
|
||||
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
||||
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,6 @@
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
@ -80,8 +81,8 @@ public void testBlockMissingException() throws Exception {
|
||||
//
|
||||
private void createOldFile(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize)
|
||||
throws IOException {
|
||||
FSDataOutputStream stm = fileSys.create(name, true,
|
||||
fileSys.getConf().getInt("io.file.buffer.size", 4096),
|
||||
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
|
||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
(short) repl, blocksize);
|
||||
// fill data into file
|
||||
final byte[] b = new byte[(int)blocksize];
|
||||
|
@ -25,7 +25,6 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
@ -41,7 +40,7 @@ public class TestClientProtocolForPipelineRecovery {
|
||||
@Test public void testGetNewStamp() throws IOException {
|
||||
int numDataNodes = 1;
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setBoolean("dfs.support.append", true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
|
@ -28,6 +28,7 @@
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
@ -80,7 +81,7 @@ public void startUpCluster() throws IOException {
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
dfs = (DistributedFileSystem) cluster.getFileSystem();
|
||||
buffersize = conf.getInt("io.file.buffer.size", 4096);
|
||||
buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -20,8 +20,6 @@
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
@ -31,11 +29,11 @@
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSInputStream;
|
||||
import org.apache.hadoop.hdfs.SocketCache;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.junit.Test;
|
||||
@ -212,6 +210,7 @@ public void testReadFromOneDN() throws IOException {
|
||||
MockGetBlockReader answer = new MockGetBlockReader();
|
||||
Mockito.doAnswer(answer).when(in).getBlockReader(
|
||||
(InetSocketAddress) Matchers.anyObject(),
|
||||
(DatanodeInfo) Matchers.anyObject(),
|
||||
Matchers.anyString(),
|
||||
(ExtendedBlock) Matchers.anyObject(),
|
||||
(Token<BlockTokenIdentifier>) Matchers.anyObject(),
|
||||
|
@ -26,11 +26,11 @@
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import junit.framework.Assert;
|
||||
import junit.framework.TestCase;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
@ -65,9 +65,9 @@ public void testDFSAddressConfig() throws IOException {
|
||||
assertNotNull("Should have been able to stop simulated datanode", dnp);
|
||||
}
|
||||
|
||||
conf.unset("dfs.datanode.address");
|
||||
conf.unset("dfs.datanode.http.address");
|
||||
conf.unset("dfs.datanode.ipc.address");
|
||||
conf.unset(DFS_DATANODE_ADDRESS_KEY);
|
||||
conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
|
||||
conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
|
||||
|
||||
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
|
||||
null, null, null, false, true);
|
||||
@ -90,9 +90,9 @@ public void testDFSAddressConfig() throws IOException {
|
||||
assertNotNull("Should have been able to stop simulated datanode", dnp);
|
||||
}
|
||||
|
||||
conf.set("dfs.datanode.address","0.0.0.0:0");
|
||||
conf.set("dfs.datanode.http.address","0.0.0.0:0");
|
||||
conf.set("dfs.datanode.ipc.address","0.0.0.0:0");
|
||||
conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
|
||||
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
|
||||
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
|
||||
|
||||
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
|
||||
null, null, null, false, true);
|
||||
|
@ -43,6 +43,7 @@
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileChecksum;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
@ -58,10 +59,7 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.Client;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
@ -144,7 +142,7 @@ public void testWriteTimeoutAtDataNode() throws IOException,
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1);
|
||||
// set a small buffer size
|
||||
final int bufferSize = 4096;
|
||||
conf.setInt("io.file.buffer.size", bufferSize);
|
||||
conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize);
|
||||
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
|
||||
|
@ -22,14 +22,13 @@
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
import javax.security.auth.login.LoginException;
|
||||
|
||||
import junit.framework.AssertionFailedError;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
@ -202,7 +201,7 @@ private void create(OpType op, Path name, short umask,
|
||||
switch (op) {
|
||||
case CREATE:
|
||||
FSDataOutputStream out = fs.create(name, permission, true,
|
||||
conf.getInt("io.file.buffer.size", 4096),
|
||||
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
|
||||
out.close();
|
||||
break;
|
||||
@ -520,8 +519,7 @@ protected void set(Path path, short ancestorPermission,
|
||||
}
|
||||
|
||||
/* Perform an operation and verify if the permission checking is correct */
|
||||
void verifyPermission(UserGroupInformation ugi) throws LoginException,
|
||||
IOException {
|
||||
void verifyPermission(UserGroupInformation ugi) throws IOException {
|
||||
if (this.ugi != ugi) {
|
||||
setRequiredPermissions(ugi);
|
||||
this.ugi = ugi;
|
||||
@ -564,8 +562,7 @@ protected boolean expectPermissionDeny() {
|
||||
}
|
||||
|
||||
/* Set the permissions required to pass the permission checking */
|
||||
protected void setRequiredPermissions(UserGroupInformation ugi)
|
||||
throws IOException {
|
||||
protected void setRequiredPermissions(UserGroupInformation ugi) {
|
||||
if (SUPERUSER.equals(ugi)) {
|
||||
requiredAncestorPermission = SUPER_MASK;
|
||||
requiredParentPermission = SUPER_MASK;
|
||||
|
@ -30,7 +30,6 @@
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
|
@ -33,7 +33,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user