diff --git a/hadoop-common-project/hadoop-auth/BUILDING.txt b/hadoop-common-project/hadoop-auth/BUILDING.txt
index cbeaf54767..b81b71cbb3 100644
--- a/hadoop-common-project/hadoop-auth/BUILDING.txt
+++ b/hadoop-common-project/hadoop-auth/BUILDING.txt
@@ -1,20 +1,20 @@
-Build instructions for Hadoop Alfredo
+Build instructions for Hadoop Auth
Same as for Hadoop.
-For more details refer to the Alfredo documentation pages.
+For more details refer to the Hadoop Auth documentation pages.
-----------------------------------------------------------------------------
Caveats:
-* Alfredo has profile to enable Kerberos testcases (testKerberos)
+* Hadoop Auth has profile to enable Kerberos testcases (testKerberos)
To run Kerberos testcases a KDC, 2 kerberos principals and a keytab file
- are required (refer to the Alfredo documentation pages for details).
+ are required (refer to the Hadoop Auth documentation pages for details).
-* Alfredo does not have a distribution profile (dist)
+* Hadoop Auth does not have a distribution profile (dist)
-* Alfredo does not have a native code profile (native)
+* Hadoop Auth does not have a native code profile (native)
-----------------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/README.txt b/hadoop-common-project/hadoop-auth/README.txt
index a51f6d3586..efa95dd516 100644
--- a/hadoop-common-project/hadoop-auth/README.txt
+++ b/hadoop-common-project/hadoop-auth/README.txt
@@ -1,6 +1,6 @@
-Hadoop Alfredo, Java HTTP SPNEGO
+Hadoop Auth, Java HTTP SPNEGO
-Hadoop Alfredo is a Java library consisting of a client and a server
+Hadoop Auth is a Java library consisting of a client and a server
components to enable Kerberos SPNEGO authentication for HTTP.
The client component is the AuthenticatedURL class.
@@ -10,6 +10,6 @@ The server component is the AuthenticationFilter servlet filter class.
Authentication mechanisms support is pluggable in both the client and
the server components via interfaces.
-In addition to Kerberos SPNEGO, Alfredo also supports Pseudo/Simple
+In addition to Kerberos SPNEGO, Hadoop Auth also supports Pseudo/Simple
authentication (trusting the value of the query string parameter
'user.name').
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 66bdbfb6f6..9bcf629f03 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -21,13 +21,12 @@
../../hadoop-projectorg.apache.hadoop
- hadoop-alfredo
+ hadoop-auth0.24.0-SNAPSHOTjar
- Apache Hadoop Alfredo
- Apache Hadoop Alfredo - Java HTTP SPNEGO
- http://hadoop.apache.org/alfredo
+ Apache Hadoop Auth
+ Apache Hadoop Auth - Java HTTP SPNEGOyyyyMMdd
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
similarity index 97%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticatedURL.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index 22a43b8454..5a446609c2 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -11,9 +11,9 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import java.io.IOException;
import java.net.HttpURLConnection;
@@ -63,7 +63,7 @@ public class AuthenticatedURL {
/**
* Name of the HTTP cookie used for the authentication token between the client and the server.
*/
- public static final String AUTH_COOKIE = "alfredo.auth";
+ public static final String AUTH_COOKIE = "hadoop.auth";
private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticationException.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
similarity index 95%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticationException.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
index ba91847665..13632fb1bc 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticationException.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
/**
* Exception thrown when an authentication error occurrs.
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/Authenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java
similarity index 95%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/Authenticator.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java
index 85f5d40530..7b23f20699 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/Authenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
import java.io.IOException;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
similarity index 97%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/KerberosAuthenticator.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 69a91f5081..b3dc6fe85c 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
import com.sun.security.auth.module.Krb5LoginModule;
import org.apache.commons.codec.binary.Base64;
@@ -48,17 +48,17 @@ public class KerberosAuthenticator implements Authenticator {
/**
* HTTP header used by the SPNEGO server endpoint during an authentication sequence.
*/
- public static String WWW_AUTHENTICATE = "WWW-Authenticate";
+ public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
/**
* HTTP header used by the SPNEGO client endpoint during an authentication sequence.
*/
- public static String AUTHORIZATION = "Authorization";
+ public static final String AUTHORIZATION = "Authorization";
/**
* HTTP header prefix used by the SPNEGO client/server endpoints during an authentication sequence.
*/
- public static String NEGOTIATE = "Negotiate";
+ public static final String NEGOTIATE = "Negotiate";
private static final String AUTH_HTTP_METHOD = "OPTIONS";
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/PseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
similarity index 97%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/PseudoAuthenticator.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
index fb7991d64f..dff7a31003 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/PseudoAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
import java.io.IOException;
import java.net.HttpURLConnection;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
similarity index 95%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationFilter.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 2b39d7ee59..f7305d0282 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -11,12 +11,12 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticatedURL;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.util.Signer;
-import org.apache.hadoop.alfredo.util.SignerException;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.Signer;
+import org.apache.hadoop.security.authentication.util.SignerException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -103,6 +103,8 @@ public class AuthenticationFilter implements Filter {
*/
public static final String COOKIE_PATH = "cookie.path";
+ private static final Random RAN = new Random();
+
private Signer signer;
private AuthenticationHandler authHandler;
private boolean randomSecret;
@@ -139,7 +141,7 @@ public void init(FilterConfig filterConfig) throws ServletException {
}
try {
- Class klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
+ Class> klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
authHandler = (AuthenticationHandler) klass.newInstance();
authHandler.init(config);
} catch (ClassNotFoundException ex) {
@@ -151,7 +153,7 @@ public void init(FilterConfig filterConfig) throws ServletException {
}
String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
if (signatureSecret == null) {
- signatureSecret = Long.toString(new Random(System.currentTimeMillis()).nextLong());
+ signatureSecret = Long.toString(RAN.nextLong());
randomSecret = true;
LOG.warn("'signature.secret' configuration not set, using a random value as secret");
}
@@ -237,7 +239,7 @@ public void destroy() {
*/
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException {
Properties props = new Properties();
- Enumeration names = filterConfig.getInitParameterNames();
+ Enumeration> names = filterConfig.getInitParameterNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
if (name.startsWith(configPrefix)) {
@@ -381,7 +383,7 @@ public Principal getUserPrincipal() {
}
/**
- * Creates the Alfredo authentiation HTTP cookie.
+ * Creates the Hadoop authentiation HTTP cookie.
*
* It sets the domain and path specified in the configuration.
*
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
similarity index 96%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationHandler.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
index e79c938699..958680fcad 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
@@ -11,9 +11,9 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
similarity index 98%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationToken.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
index 0ae9947a8f..fd17249ce6 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationToken.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
@@ -11,9 +11,9 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import java.security.Principal;
import java.util.Arrays;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
similarity index 97%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/KerberosAuthenticationHandler.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index ee985d9cdd..121d96628b 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -11,13 +11,13 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import com.sun.security.auth.module.Krb5LoginModule;
import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.alfredo.util.KerberosName;
+import org.apache.hadoop.security.authentication.util.KerberosName;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSManager;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/PseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
similarity index 95%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/PseudoAuthenticationHandler.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
index 4783c00822..f23b2d0381 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/PseudoAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
@@ -11,10 +11,10 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.client.PseudoAuthenticator;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/KerberosName.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
similarity index 99%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/KerberosName.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
index 7d68e8cf20..6a7ae0e412 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/KerberosName.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
@@ -1,4 +1,4 @@
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/Signer.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
similarity index 98%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/Signer.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index aba73cbaee..10c9a8e238 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/Signer.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
import org.apache.commons.codec.binary.Base64;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/SignerException.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java
similarity index 94%
rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/SignerException.java
rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java
index 7bab225cf0..faf2007b0b 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/SignerException.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
/**
* Exception thrown by {@link Signer} when a string signature is invalid.
diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
index 32d09d7c43..a2e015ae0c 100644
--- a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
+++ b/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
@@ -11,12 +11,12 @@
~~ limitations under the License. See accompanying LICENSE file.
---
- Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It
+ Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
---
---
${maven.build.timestamp}
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It
+Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
\[ {{{./index.html}Go Back}} \]
@@ -50,14 +50,14 @@ $ mvn test -PtestKerberos
The following Maven <<<-D>>> options can be used to change the default
values:
- * <<>>: default value <>
+ * <<>>: default value <>
- * <<>>: default value <>
+ * <<>>: default value <>
- * <<>>: default value
+ * <<>>: default value
<> (it must start 'HTTP/')
- * <<>>: default value
+ * <<>>: default value
<<${HOME}/${USER}.keytab>>
** Generating Documentation
@@ -69,7 +69,7 @@ $ mvn package -Pdocs
+---+
The generated documentation is available at
- <<>>.
+ <<>>.
\[ {{{./index.html}Go Back}} \]
diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
index d4d18151c3..e42ee8b4c3 100644
--- a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
+++ b/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
@@ -11,30 +11,30 @@
~~ limitations under the License. See accompanying LICENSE file.
---
- Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side
+ Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side
Configuration
---
---
${maven.build.timestamp}
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side
+Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side
Configuration
\[ {{{./index.html}Go Back}} \]
* Server Side Configuration Setup
- The {{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationFilter.html}
- AuthenticationFilter filter}} is Alfredo's server side component.
+ The {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationFilter.html}
+ AuthenticationFilter filter}} is Hadoop Auth's server side component.
This filter must be configured in front of all the web application resources
that required authenticated requests. For example:
- The Alfredo and dependent JAR files must be in the web application classpath
- (commonly the <<>> directory).
+ The Hadoop Auth and dependent JAR files must be in the web application
+ classpath (commonly the <<>> directory).
- Alfredo uses SLF4J-API for logging. Alfredo Maven POM dependencies define the
- SLF4J API dependency but it does not define the dependency on a concrete
+ Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define
+ the SLF4J API dependency but it does not define the dependency on a concrete
logging implementation, this must be addded explicitly to the web
application. For example, if the web applicationan uses Log4j, the
SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application
@@ -47,7 +47,7 @@ Configuration
* <<<[PREFIX.]type>>>: the authentication type keyword (<<>> or
<<>>) or a
- {{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationHandler.html}
+ {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationHandler.html}
Authentication handler implementation}}.
* <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated
@@ -80,7 +80,7 @@ Configuration
* <<<[PREFIX.]kerberos.keytab>>>: The path to the keytab file containing
the credentials for the kerberos principal. For example:
- <<>>. There is no default value.
+ <<>>. There is no default value.
<>:
@@ -90,7 +90,7 @@ Configuration
kerberosFilter
- org.apache.hadoop.alfredo.server.AuthenticationFilter
+ org.apache.hadoop.security.auth.server.AuthenticationFiltertypekerberos
@@ -113,7 +113,7 @@ Configuration
kerberos.keytab
- /tmp/alfredo.keytab
+ /tmp/auth.keytab
@@ -146,7 +146,7 @@ Configuration
simpleFilter
- org.apache.hadoop.alfredo.server.AuthenticationFilter
+ org.apache.hadoop.security.auth.server.AuthenticationFiltertypesimple
diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm
index d17b7e8a98..7070862d9e 100644
--- a/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm
+++ b/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm
@@ -11,16 +11,16 @@
~~ limitations under the License. See accompanying LICENSE file.
---
- Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
+ Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples
---
---
${maven.build.timestamp}
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
+Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples
\[ {{{./index.html}Go Back}} \]
-* Accessing a Alfredo protected URL Using a browser
+* Accessing a Hadoop Auth protected URL Using a browser
<> The browser must support HTTP Kerberos SPNEGO. For example,
Firefox or Internet Explorer.
@@ -31,7 +31,7 @@ Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
the domain of the web server that is HTTP Kerberos SPNEGO protected (if using
multiple domains and hostname use comma to separate them).
-* Accessing a Alfredo protected URL Using <<>>
+* Accessing a Hadoop Auth protected URL Using <<>>
<> The <<>> version must support GSS, run <<>>.
@@ -48,10 +48,10 @@ Features: GSS-Negotiate IPv6 Largefile NTLM SSL libz
+---+
$ kinit
Please enter the password for tucu@LOCALHOST:
-$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who
+$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who
Enter host password for user 'tucu':
-Hello Alfredo!
+Hello Hadoop Auth Examples!
+---+
* The <<<--negotiate>>> option enables SPNEGO in <<>>.
@@ -68,7 +68,7 @@ Hello Alfredo!
+---+
...
-URL url = new URL("http://localhost:8080/alfredo/kerberos/who");
+URL url = new URL("http://localhost:8080/hadoop-auth/kerberos/who");
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
...
HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
@@ -79,12 +79,12 @@ conn = new AuthenticatedURL(url, token).openConnection();
* Building and Running the Examples
- Download Alfredo's source code, the examples are in the
+ Download Hadoop-Auth's source code, the examples are in the
<<>> directory.
** Server Example:
- Edit the <<>> and set the
+ Edit the <<>> and set the
right configuration init parameters for the <<>>
definition configured for Kerberos (the right Kerberos principal and keytab
file must be specified). Refer to the {{{./Configuration.html}Configuration
@@ -106,11 +106,11 @@ conn = new AuthenticatedURL(url, token).openConnection();
$ kinit
Please enter the password for tucu@LOCALHOST:
-$ curl http://localhost:8080/alfredo-examples/anonymous/who
+$ curl http://localhost:8080/hadoop-auth-examples/anonymous/who
-$ curl http://localhost:8080/alfredo-examples/simple/who?user.name=foo
+$ curl http://localhost:8080/hadoop-auth-examples/simple/who?user.name=foo
-$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who
+$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who
+---+
** Accessing the server using the Java client example
@@ -121,7 +121,7 @@ Please enter the password for tucu@LOCALHOST:
$ cd examples
-$ mvn exec:java -Durl=http://localhost:8080/alfredo-examples/kerberos/who
+$ mvn exec:java -Durl=http://localhost:8080/hadoop-auth-examples/kerberos/who
....
diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
index d070ff92b2..a2e7b5e915 100644
--- a/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
+++ b/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
@@ -11,27 +11,27 @@
~~ limitations under the License. See accompanying LICENSE file.
---
- Hadoop Alfredo, Java HTTP SPNEGO ${project.version}
+ Hadoop Auth, Java HTTP SPNEGO ${project.version}
---
---
${maven.build.timestamp}
-Hadoop Alfredo, Java HTTP SPNEGO ${project.version}
+Hadoop Auth, Java HTTP SPNEGO ${project.version}
- Hadoop Alfredo is a Java library consisting of a client and a server
+ Hadoop Auth is a Java library consisting of a client and a server
components to enable Kerberos SPNEGO authentication for HTTP.
- Alfredo also supports additional authentication mechanisms on the client
+ Hadoop Auth also supports additional authentication mechanisms on the client
and the server side via 2 simple interfaces.
* License
- Alfredo is distributed under {{{http://www.apache.org/licenses/}Apache
+ Hadoop Auth is distributed under {{{http://www.apache.org/licenses/}Apache
License 2.0}}.
-* How Does Alfredo Works?
+* How Does Auth Works?
- Alfredo enforces authentication on protected resources, once authentiation
+ Hadoop Auth enforces authentication on protected resources, once authentiation
has been established it sets a signed HTTP Cookie that contains an
authentication token with the user name, user principal, authentication type
and expiration time.
diff --git a/hadoop-common-project/hadoop-auth/src/site/site.xml b/hadoop-common-project/hadoop-auth/src/site/site.xml
index 483581dc9f..4fab0f0e1d 100644
--- a/hadoop-common-project/hadoop-auth/src/site/site.xml
+++ b/hadoop-common-project/hadoop-auth/src/site/site.xml
@@ -11,7 +11,7 @@
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
-
+
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/KerberosTestUtils.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
similarity index 97%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/KerberosTestUtils.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
index ae720dbb79..92e1de5a26 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/KerberosTestUtils.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo;
+package org.apache.hadoop.security.authentication;
import com.sun.security.auth.module.Krb5LoginModule;
@@ -34,7 +34,7 @@
* Test helper class for Java Kerberos setup.
*/
public class KerberosTestUtils {
- private static final String PREFIX = "alfredo.test.";
+ private static final String PREFIX = "hadoop-auth.test.";
public static final String REALM = PREFIX + "kerberos.realm";
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/AuthenticatorTestCase.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
similarity index 96%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/AuthenticatorTestCase.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
index c139fa5902..93c519808f 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/AuthenticatorTestCase.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
@@ -11,9 +11,9 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import junit.framework.TestCase;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
@@ -57,6 +57,7 @@ protected Properties getConfiguration(String configPrefix, FilterConfig filterCo
}
}
+ @SuppressWarnings("serial")
public static class TestServlet extends HttpServlet {
@Override
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestAuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
similarity index 98%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestAuthenticatedURL.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
index f082fadfc8..525af62606 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestAuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
import junit.framework.TestCase;
import org.mockito.Mockito;
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
similarity index 88%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestKerberosAuthenticator.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
index 2fdb9bc253..f086870ee1 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestKerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
@@ -11,12 +11,12 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
-import org.apache.hadoop.alfredo.KerberosTestUtils;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
-import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler;
-import org.apache.hadoop.alfredo.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import java.net.HttpURLConnection;
import java.net.URL;
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestPseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
similarity index 93%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestPseudoAuthenticator.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
index 5d151c2337..807052e848 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestPseudoAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
@@ -11,10 +11,10 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.client;
+package org.apache.hadoop.security.authentication.client;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
-import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import java.net.HttpURLConnection;
import java.net.URL;
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
similarity index 98%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationFilter.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
index e450a5603f..415600e97e 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
@@ -11,11 +11,11 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticatedURL;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.util.Signer;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.Signer;
import junit.framework.TestCase;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java
similarity index 96%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationToken.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java
index 1c29a3364d..25f9100217 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationToken.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java
@@ -11,9 +11,9 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import junit.framework.TestCase;
public class TestAuthenticationToken extends TestCase {
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
similarity index 95%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestKerberosAuthenticationHandler.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
index 3089d1a659..8187c9ec66 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestKerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
@@ -11,11 +11,11 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.KerberosTestUtils;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
-import org.apache.hadoop.alfredo.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import junit.framework.TestCase;
import org.apache.commons.codec.binary.Base64;
import org.ietf.jgss.GSSContext;
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestPseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
similarity index 94%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestPseudoAuthenticationHandler.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
index 3a05bd435d..dbc2c36833 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestPseudoAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
@@ -11,11 +11,11 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.server;
+package org.apache.hadoop.security.authentication.server;
-import org.apache.hadoop.alfredo.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import junit.framework.TestCase;
-import org.apache.hadoop.alfredo.client.PseudoAuthenticator;
+import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import org.mockito.Mockito;
import javax.servlet.http.HttpServletRequest;
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestKerberosName.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
similarity index 95%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestKerberosName.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
index 16a15aa647..b6c0b0fb2e 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestKerberosName.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
@@ -1,4 +1,4 @@
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one
@@ -20,7 +20,7 @@
import java.io.IOException;
-import org.apache.hadoop.alfredo.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestSigner.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
similarity index 97%
rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestSigner.java
rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
index c0236ba7c4..9b3d1a2a2a 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestSigner.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
@@ -11,7 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
-package org.apache.hadoop.alfredo.util;
+package org.apache.hadoop.security.authentication.util;
import junit.framework.TestCase;
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 73fc8f82a8..d62fa2db3a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2,6 +2,16 @@ Hadoop Change Log
Trunk (unreleased changes)
+ IMPROVEMENTS
+
+ HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
+ HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
+
+ BUGS
+
+ HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
+ by Jersey (Alejandro Abdelnur via atm)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
@@ -347,6 +357,20 @@ Release 0.23.0 - Unreleased
HADOOP-7547. Add generic type in WritableComparable subclasses.
(Uma Maheswara Rao G via szetszwo)
+ HADOOP-7579. Rename package names from alfredo to auth.
+ (Alejandro Abdelnur via szetszwo)
+
+ HADOOP-7594. Support HTTP REST in HttpServer. (szetszwo)
+
+ HADOOP-7552. FileUtil#fullyDelete doesn't throw IOE but lists it
+ in the throws clause. (eli)
+
+ HADOOP-7580. Add a version of getLocalPathForWrite to LocalDirAllocator
+ which doesn't create dirs. (Chris Douglas & Siddharth Seth via acmurthy)
+
+ HADOOP-7507. Allow ganglia metrics to include the metrics system tags
+ in the gmetric names. (Alejandro Abdelnur via todd)
+
OPTIMIZATIONS
HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
@@ -533,6 +557,12 @@ Release 0.23.0 - Unreleased
HADOOP-7560. Change src layout to be heirarchical. (Alejandro Abdelnur
via acmurthy)
+ HADOOP-7576. Fix findbugs warnings and javac warnings in hadoop-auth.
+ (szetszwo)
+
+ HADOOP-7593. Fix AssertionError in TestHttpServer.testMaxThreads().
+ (Uma Maheswara Rao G via szetszwo)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 1dbc2a908f..6c1c00edac 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -92,6 +92,28 @@
jetty-utilcompile
+
+
+ asm
+ asm
+ compile
+
+
+ com.sun.jersey
+ jersey-core
+ compile
+
+
+ com.sun.jersey
+ jersey-json
+ compile
+
+
+ com.sun.jersey
+ jersey-server
+ compile
+
+
tomcatjasper-compiler
@@ -239,7 +261,7 @@
org.apache.hadoop
- hadoop-alfredo
+ hadoop-authcompile
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
index a6e2f0b16f..f2826b69a6 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
@@ -43,6 +43,16 @@
#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If specifiying multiple tags separate them with
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+#*.sink.ganglia.tagsForPrefix.fairscheduler=
+
#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index e3338c783a..ede0c93480 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -149,3 +149,25 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
#log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
#log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
#log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Yarn ResourceManager Application Summary Log
+#
+# Set the ResourceManager summary log filename
+#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# Appender for ResourceManager Application Summary Log - rolled daily
+# Requires the following properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 023ec69cdb..8e7aa302a6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -28,7 +28,6 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
@@ -88,7 +87,7 @@ public static Path[] stat2Paths(FileStatus[] stats, Path path) {
* (4) If dir is a normal directory, then dir and all its contents recursively
* are deleted.
*/
- public static boolean fullyDelete(File dir) throws IOException {
+ public static boolean fullyDelete(File dir) {
if (dir.delete()) {
// dir is (a) normal file, (b) symlink to a file, (c) empty directory or
// (d) symlink to a directory
@@ -108,7 +107,7 @@ public static boolean fullyDelete(File dir) throws IOException {
* If dir is a symlink to a directory, all the contents of the actual
* directory pointed to by dir will be deleted.
*/
- public static boolean fullyDeleteContents(File dir) throws IOException {
+ public static boolean fullyDeleteContents(File dir) {
boolean deletionSucceeded = true;
File contents[] = dir.listFiles();
if (contents != null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 3753b2b9a3..71c8235757 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -128,8 +128,26 @@ public Path getLocalPathForWrite(String pathStr,
*/
public Path getLocalPathForWrite(String pathStr, long size,
Configuration conf) throws IOException {
+ return getLocalPathForWrite(pathStr, size, conf, true);
+ }
+
+ /** Get a path from the local FS. Pass size as
+ * SIZE_UNKNOWN if not known apriori. We
+ * round-robin over the set of disks (via the configured dirs) and return
+ * the first complete path which has enough space
+ * @param pathStr the requested path (this will be created on the first
+ * available disk)
+ * @param size the size of the file that is going to be written
+ * @param conf the Configuration object
+ * @param checkWrite ensure that the path is writable
+ * @return the complete path to the file on a local disk
+ * @throws IOException
+ */
+ public Path getLocalPathForWrite(String pathStr, long size,
+ Configuration conf,
+ boolean checkWrite) throws IOException {
AllocatorPerContext context = obtainContext(contextCfgItemName);
- return context.getLocalPathForWrite(pathStr, size, conf);
+ return context.getLocalPathForWrite(pathStr, size, conf, checkWrite);
}
/** Get a path from the local FS for reading. We search through all the
@@ -145,6 +163,23 @@ public Path getLocalPathToRead(String pathStr,
AllocatorPerContext context = obtainContext(contextCfgItemName);
return context.getLocalPathToRead(pathStr, conf);
}
+
+ /**
+ * Get all of the paths that currently exist in the working directories.
+ * @param pathStr the path underneath the roots
+ * @param conf the configuration to look up the roots in
+ * @return all of the paths that exist under any of the roots
+ * @throws IOException
+ */
+ public Iterable getAllLocalPathsToRead(String pathStr,
+ Configuration conf
+ ) throws IOException {
+ AllocatorPerContext context;
+ synchronized (this) {
+ context = obtainContext(contextCfgItemName);
+ }
+ return context.getAllLocalPathsToRead(pathStr, conf);
+ }
/** Creates a temporary file in the local FS. Pass size as -1 if not known
* apriori. We round-robin over the set of disks (via the configured dirs)
@@ -214,7 +249,8 @@ public AllocatorPerContext(String contextCfgItemName) {
/** This method gets called everytime before any read/write to make sure
* that any change to localDirs is reflected immediately.
*/
- private void confChanged(Configuration conf) throws IOException {
+ private synchronized void confChanged(Configuration conf)
+ throws IOException {
String newLocalDirs = conf.get(contextCfgItemName);
if (!newLocalDirs.equals(savedLocalDirs)) {
localDirs = conf.getTrimmedStrings(contextCfgItemName);
@@ -251,18 +287,22 @@ private void confChanged(Configuration conf) throws IOException {
}
}
- private Path createPath(String path) throws IOException {
+ private Path createPath(String path,
+ boolean checkWrite) throws IOException {
Path file = new Path(new Path(localDirs[dirNumLastAccessed]),
path);
- //check whether we are able to create a directory here. If the disk
- //happens to be RDONLY we will fail
- try {
- DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
- return file;
- } catch (DiskErrorException d) {
- LOG.warn("Disk Error Exception: ", d);
- return null;
+ if (checkWrite) {
+ //check whether we are able to create a directory here. If the disk
+ //happens to be RDONLY we will fail
+ try {
+ DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
+ return file;
+ } catch (DiskErrorException d) {
+ LOG.warn("Disk Error Exception: ", d);
+ return null;
+ }
}
+ return file;
}
/**
@@ -272,17 +312,6 @@ private Path createPath(String path) throws IOException {
int getCurrentDirectoryIndex() {
return dirNumLastAccessed;
}
-
- /** Get a path from the local FS. This method should be used if the size of
- * the file is not known a priori.
- *
- * It will use roulette selection, picking directories
- * with probability proportional to their available space.
- */
- public synchronized Path getLocalPathForWrite(String path,
- Configuration conf) throws IOException {
- return getLocalPathForWrite(path, SIZE_UNKNOWN, conf);
- }
/** Get a path from the local FS. If size is known, we go
* round-robin over the set of disks (via the configured dirs) and return
@@ -292,7 +321,7 @@ public synchronized Path getLocalPathForWrite(String path,
* with probability proportional to their available space.
*/
public synchronized Path getLocalPathForWrite(String pathStr, long size,
- Configuration conf) throws IOException {
+ Configuration conf, boolean checkWrite) throws IOException {
confChanged(conf);
int numDirs = localDirs.length;
int numDirsSearched = 0;
@@ -324,7 +353,7 @@ public synchronized Path getLocalPathForWrite(String pathStr, long size,
dir++;
}
dirNumLastAccessed = dir;
- returnPath = createPath(pathStr);
+ returnPath = createPath(pathStr, checkWrite);
if (returnPath == null) {
totalAvailable -= availableOnDisk[dir];
availableOnDisk[dir] = 0; // skip this disk
@@ -335,7 +364,7 @@ public synchronized Path getLocalPathForWrite(String pathStr, long size,
while (numDirsSearched < numDirs && returnPath == null) {
long capacity = dirDF[dirNumLastAccessed].getAvailable();
if (capacity > size) {
- returnPath = createPath(pathStr);
+ returnPath = createPath(pathStr, checkWrite);
}
dirNumLastAccessed++;
dirNumLastAccessed = dirNumLastAccessed % numDirs;
@@ -361,7 +390,7 @@ public File createTmpFileForWrite(String pathStr, long size,
Configuration conf) throws IOException {
// find an appropriate directory
- Path path = getLocalPathForWrite(pathStr, size, conf);
+ Path path = getLocalPathForWrite(pathStr, size, conf, true);
File dir = new File(path.getParent().toUri().getPath());
String prefix = path.getName();
@@ -398,6 +427,74 @@ public synchronized Path getLocalPathToRead(String pathStr,
" the configured local directories");
}
+ private static class PathIterator implements Iterator, Iterable {
+ private final FileSystem fs;
+ private final String pathStr;
+ private int i = 0;
+ private final String[] rootDirs;
+ private Path next = null;
+
+ private PathIterator(FileSystem fs, String pathStr, String[] rootDirs)
+ throws IOException {
+ this.fs = fs;
+ this.pathStr = pathStr;
+ this.rootDirs = rootDirs;
+ advance();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return next != null;
+ }
+
+ private void advance() throws IOException {
+ while (i < rootDirs.length) {
+ next = new Path(rootDirs[i++], pathStr);
+ if (fs.exists(next)) {
+ return;
+ }
+ }
+ next = null;
+ }
+
+ @Override
+ public Path next() {
+ Path result = next;
+ try {
+ advance();
+ } catch (IOException ie) {
+ throw new RuntimeException("Can't check existance of " + next, ie);
+ }
+ return result;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("read only iterator");
+ }
+
+ @Override
+ public Iterator iterator() {
+ return this;
+ }
+ }
+
+ /**
+ * Get all of the paths that currently exist in the working directories.
+ * @param pathStr the path underneath the roots
+ * @param conf the configuration to look up the roots in
+ * @return all of the paths that exist under any of the roots
+ * @throws IOException
+ */
+ synchronized Iterable getAllLocalPathsToRead(String pathStr,
+ Configuration conf) throws IOException {
+ confChanged(conf);
+ if (pathStr.startsWith("/")) {
+ pathStr = pathStr.substring(1);
+ }
+ return new PathIterator(localFS, pathStr, localDirs);
+ }
+
/** We search through all the configured dirs for the file's existence
* and return true when we find one
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 63579980cd..fe40d8bc4b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -228,10 +228,10 @@ public void write(int b) throws IOException {
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
if (!exists(f)) {
- throw new FileNotFoundException("File " + f + " not found.");
+ throw new FileNotFoundException("File " + f + " not found");
}
if (getFileStatus(f).isDirectory()) {
- throw new IOException("Cannot append to a diretory (=" + f + " ).");
+ throw new IOException("Cannot append to a diretory (=" + f + " )");
}
return new FSDataOutputStream(new BufferedOutputStream(
new LocalFSFileOutputStream(f, true), bufferSize), statistics);
@@ -242,7 +242,7 @@ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress)
throws IOException {
if (exists(f) && !overwrite) {
- throw new IOException("File already exists:"+f);
+ throw new IOException("File already exists: "+f);
}
Path parent = f.getParent();
if (parent != null && !mkdirs(parent)) {
@@ -271,11 +271,18 @@ public boolean rename(Path src, Path dst) throws IOException {
return FileUtil.copy(this, src, this, dst, true, getConf());
}
+ /**
+ * Delete the given path to a file or directory.
+ * @param p the path to delete
+ * @param recursive to delete sub-directories
+ * @return true if the file or directory and all its contents were deleted
+ * @throws IOException if p is non-empty and recursive is false
+ */
public boolean delete(Path p, boolean recursive) throws IOException {
File f = pathToFile(p);
if (f.isFile()) {
return f.delete();
- } else if ((!recursive) && f.isDirectory() &&
+ } else if (!recursive && f.isDirectory() &&
(FileUtil.listFiles(f).length != 0)) {
throw new IOException("Directory " + f.toString() + " is not empty");
}
@@ -287,7 +294,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
FileStatus[] results;
if (!localf.exists()) {
- throw new FileNotFoundException("File " + f + " does not exist.");
+ throw new FileNotFoundException("File " + f + " does not exist");
}
if (localf.isFile()) {
return new FileStatus[] {
@@ -421,7 +428,7 @@ public FileStatus getFileStatus(Path f) throws IOException {
if (path.exists()) {
return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
} else {
- throw new FileNotFoundException("File " + f + " does not exist.");
+ throw new FileNotFoundException("File " + f + " does not exist");
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index d5deb7df92..00cdf32746 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -48,16 +48,12 @@
import org.apache.hadoop.conf.ConfServlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.http.AdminAuthorizedServlet;
-import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.http.FilterInitializer;
-import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel;
import org.apache.hadoop.metrics.MetricsServlet;
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
-import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.ReflectionUtils;
import org.mortbay.io.Buffer;
@@ -79,6 +75,8 @@
import org.mortbay.thread.QueuedThreadPool;
import org.mortbay.util.MultiException;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
/**
* Create a Jetty embedded server to answer http requests. The primary goal
* is to serve up status information for the server.
@@ -178,7 +176,7 @@ public HttpServer(String name, String bindAddress, int port,
int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
// If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
- // default value (currently 254).
+ // default value (currently 250).
QueuedThreadPool threadPool = maxThreads == -1 ?
new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
webServer.setThreadPool(threadPool);
@@ -325,6 +323,22 @@ public void setAttribute(String name, Object value) {
webAppContext.setAttribute(name, value);
}
+ /**
+ * Add a Jersey resource package.
+ * @param packageName The Java package name containing the Jersey resource.
+ * @param pathSpec The path spec for the servlet
+ */
+ public void addJerseyResourcePackage(final String packageName,
+ final String pathSpec) {
+ LOG.info("addJerseyResourcePackage: packageName=" + packageName
+ + ", pathSpec=" + pathSpec);
+ final ServletHolder sh = new ServletHolder(ServletContainer.class);
+ sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
+ "com.sun.jersey.api.core.PackagesResourceConfig");
+ sh.setInitParameter("com.sun.jersey.config.property.packages", packageName);
+ webAppContext.addServlet(sh, pathSpec);
+ }
+
/**
* Add a servlet in the server.
* @param name The name of the servlet (can be passed as null)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 9c35e8e9c4..c339ce7eaa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -285,8 +285,8 @@ public Connection(ConnectionId remoteId) throws IOException {
authMethod = AuthMethod.KERBEROS;
}
- header = new ConnectionHeader(protocol == null ? null : protocol
- .getName(), ticket, authMethod);
+ header =
+ new ConnectionHeader(RPC.getProtocolName(protocol), ticket, authMethod);
if (LOG.isDebugEnabled())
LOG.debug("Use " + authMethod + " authentication for protocol "
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java
new file mode 100644
index 0000000000..924fa8b150
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+
+/**
+ * The protocol name that is used when a client and server connect.
+ * By default the class name of the protocol interface is the protocol name.
+ *
+ * Why override the default name (i.e. the class name)?
+ * One use case overriding the default name (i.e. the class name) is when
+ * there are multiple implementations of the same protocol, each with say a
+ * different version/serialization.
+ * In Hadoop this is used to allow multiple server and client adapters
+ * for different versions of the same protocol service.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+public @interface ProtocolInfo {
+ String protocolName(); // the name of the protocol (i.e. rpc service)
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index b42b9133f5..453a5dd175 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -62,6 +62,20 @@
*/
public class RPC {
static final Log LOG = LogFactory.getLog(RPC.class);
+
+
+ /**
+ * Get the protocol name.
+ * If the protocol class has a ProtocolAnnotation, then get the protocol
+ * name from the annotation; otherwise the class name is the protocol name.
+ */
+ static public String getProtocolName(Class> protocol) {
+ if (protocol == null) {
+ return null;
+ }
+ ProtocolInfo anno = (ProtocolInfo) protocol.getAnnotation(ProtocolInfo.class);
+ return (anno == null) ? protocol.getName() : anno.protocolName();
+ }
private RPC() {} // no public ctor
@@ -553,8 +567,10 @@ public static Server getServer(Class> protocol,
}
/** Construct a server for a protocol implementation instance. */
- public static Server getServer(Class> protocol,
- Object instance, String bindAddress, int port,
+
+ public static
+ Server getServer(Class protocol,
+ IMPL instance, String bindAddress, int port,
int numHandlers, int numReaders, int queueSizePerHandler,
boolean verbose, Configuration conf,
SecretManager extends TokenIdentifier> secretManager)
@@ -576,6 +592,18 @@ protected Server(String bindAddress, int port,
super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler,
conf, serverName, secretManager);
}
+
+ /**
+ * Add a protocol to the existing server.
+ * @param protocolClass - the protocol class
+ * @param protocolImpl - the impl of the protocol that will be called
+ * @return the server (for convenience)
+ */
+ public
+ Server addProtocol(Class protocolClass, IMPL protocolImpl
+ ) throws IOException {
+ throw new IOException("addProtocol Not Implemented");
+ }
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 96ec07929f..0bfc5722f4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -900,7 +900,7 @@ public class Connection {
private InetAddress addr;
ConnectionHeader header = new ConnectionHeader();
- Class> protocol;
+ String protocolName;
boolean useSasl;
SaslServer saslServer;
private AuthMethod authMethod;
@@ -1287,15 +1287,8 @@ private void processHeader(byte[] buf) throws IOException {
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(buf));
header.readFields(in);
- try {
- String protocolClassName = header.getProtocol();
- if (protocolClassName != null) {
- protocol = getProtocolClass(header.getProtocol(), conf);
- rpcDetailedMetrics.init(protocol);
- }
- } catch (ClassNotFoundException cnfe) {
- throw new IOException("Unknown protocol: " + header.getProtocol());
- }
+ protocolName = header.getProtocol();
+
UserGroupInformation protocolUser = header.getUgi();
if (!useSasl) {
@@ -1484,7 +1477,7 @@ public void run() {
// Make the call as the user via Subject.doAs, thus associating
// the call with the Subject
if (call.connection.user == null) {
- value = call(call.connection.protocol, call.param,
+ value = call(call.connection.protocolName, call.param,
call.timestamp);
} else {
value =
@@ -1493,7 +1486,7 @@ public void run() {
@Override
public Writable run() throws Exception {
// make the call
- return call(call.connection.protocol,
+ return call(call.connection.protocolName,
call.param, call.timestamp);
}
@@ -1753,7 +1746,7 @@ public synchronized InetSocketAddress getListenerAddress() {
/**
* Called for each call.
- * @deprecated Use {@link #call(Class, Writable, long)} instead
+ * @deprecated Use {@link #call(String, Writable, long)} instead
*/
@Deprecated
public Writable call(Writable param, long receiveTime) throws IOException {
@@ -1761,7 +1754,7 @@ public Writable call(Writable param, long receiveTime) throws IOException {
}
/** Called for each call. */
- public abstract Writable call(Class> protocol,
+ public abstract Writable call(String protocol,
Writable param, long receiveTime)
throws IOException;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java
index 4558f2150d..4d02027a0e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java
@@ -34,7 +34,6 @@ public interface VersionedProtocol {
* @return the version that the server will speak
* @throws IOException if any IO error occurs
*/
- @Deprecated
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index e587913923..b28949d99a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -27,6 +27,9 @@
import java.net.InetSocketAddress;
import java.io.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
import java.util.Map;
import java.util.HashMap;
@@ -35,6 +38,7 @@
import org.apache.commons.logging.*;
import org.apache.hadoop.io.*;
+import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -47,10 +51,46 @@
public class WritableRpcEngine implements RpcEngine {
private static final Log LOG = LogFactory.getLog(RPC.class);
+
+ /**
+ * Get all superInterfaces that extend VersionedProtocol
+ * @param childInterfaces
+ * @return the super interfaces that extend VersionedProtocol
+ */
+ private static Class>[] getSuperInterfaces(Class>[] childInterfaces) {
+ List> allInterfaces = new ArrayList>();
+
+ for (Class> childInterface : childInterfaces) {
+ if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
+ allInterfaces.add(childInterface);
+ allInterfaces.addAll(
+ Arrays.asList(
+ getSuperInterfaces(childInterface.getInterfaces())));
+ } else {
+ LOG.warn("Interface " + childInterface +
+ " ignored because it does not extend VersionedProtocol");
+ }
+ }
+ return (Class>[]) allInterfaces.toArray(new Class[allInterfaces.size()]);
+ }
+
+ /**
+ * Get all interfaces that the given protocol implements or extends
+ * which are assignable from VersionedProtocol.
+ */
+ private static Class>[] getProtocolInterfaces(Class> protocol) {
+ Class>[] interfaces = protocol.getInterfaces();
+ return getSuperInterfaces(interfaces);
+ }
+
+
//writableRpcVersion should be updated if there is a change
//in format of the rpc messages.
- public static long writableRpcVersion = 1L;
+
+ // 2L - added declared class to Invocation
+ public static final long writableRpcVersion = 2L;
+
/** A method invocation, including the method name and its parameters.*/
private static class Invocation implements Writable, Configurable {
private String methodName;
@@ -59,11 +99,13 @@ private static class Invocation implements Writable, Configurable {
private Configuration conf;
private long clientVersion;
private int clientMethodsHash;
+ private String declaringClassProtocolName;
//This could be different from static writableRpcVersion when received
//at server, if client is using a different version.
private long rpcVersion;
+ @SuppressWarnings("unused") // called when deserializing an invocation
public Invocation() {}
public Invocation(Method method, Object[] parameters) {
@@ -88,6 +130,8 @@ public Invocation(Method method, Object[] parameters) {
this.clientMethodsHash = ProtocolSignature.getFingerprint(method
.getDeclaringClass().getMethods());
}
+ this.declaringClassProtocolName =
+ RPC.getProtocolName(method.getDeclaringClass());
}
/** The name of the method invoked. */
@@ -103,6 +147,7 @@ private long getProtocolVersion() {
return clientVersion;
}
+ @SuppressWarnings("unused")
private int getClientMethodsHash() {
return clientMethodsHash;
}
@@ -115,8 +160,10 @@ public long getRpcVersion() {
return rpcVersion;
}
+ @SuppressWarnings("deprecation")
public void readFields(DataInput in) throws IOException {
rpcVersion = in.readLong();
+ declaringClassProtocolName = UTF8.readString(in);
methodName = UTF8.readString(in);
clientVersion = in.readLong();
clientMethodsHash = in.readInt();
@@ -124,13 +171,16 @@ public void readFields(DataInput in) throws IOException {
parameterClasses = new Class[parameters.length];
ObjectWritable objectWritable = new ObjectWritable();
for (int i = 0; i < parameters.length; i++) {
- parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf);
+ parameters[i] =
+ ObjectWritable.readObject(in, objectWritable, this.conf);
parameterClasses[i] = objectWritable.getDeclaredClass();
}
}
+ @SuppressWarnings("deprecation")
public void write(DataOutput out) throws IOException {
out.writeLong(rpcVersion);
+ UTF8.writeString(out, declaringClassProtocolName);
UTF8.writeString(out, methodName);
out.writeLong(clientVersion);
out.writeInt(clientMethodsHash);
@@ -273,30 +323,161 @@ public Object[] call(Method method, Object[][] params,
/** Construct a server for a protocol implementation instance listening on a
* port and address. */
- public Server getServer(Class> protocol,
- Object instance, String bindAddress, int port,
- int numHandlers, int numReaders, int queueSizePerHandler,
- boolean verbose, Configuration conf,
+ public RPC.Server getServer(Class> protocolClass,
+ Object protocolImpl, String bindAddress, int port,
+ int numHandlers, int numReaders, int queueSizePerHandler,
+ boolean verbose, Configuration conf,
SecretManager extends TokenIdentifier> secretManager)
throws IOException {
- return new Server(instance, conf, bindAddress, port, numHandlers,
- numReaders, queueSizePerHandler, verbose, secretManager);
+ return new Server(protocolClass, protocolImpl, conf, bindAddress, port,
+ numHandlers, numReaders, queueSizePerHandler, verbose, secretManager);
}
+
/** An RPC Server. */
public static class Server extends RPC.Server {
- private Object instance;
private boolean verbose;
+
+ /**
+ * The key in Map
+ */
+ static class ProtoNameVer {
+ final String protocol;
+ final long version;
+ ProtoNameVer(String protocol, long ver) {
+ this.protocol = protocol;
+ this.version = ver;
+ }
+ @Override
+ public boolean equals(Object o) {
+ if (o == null)
+ return false;
+ if (this == o)
+ return true;
+ if (! (o instanceof ProtoNameVer))
+ return false;
+ ProtoNameVer pv = (ProtoNameVer) o;
+ return ((pv.protocol.equals(this.protocol)) &&
+ (pv.version == this.version));
+ }
+ @Override
+ public int hashCode() {
+ return protocol.hashCode() * 37 + (int) version;
+ }
+ }
+
+ /**
+ * The value in map
+ */
+ static class ProtoClassProtoImpl {
+ final Class> protocolClass;
+ final Object protocolImpl;
+ ProtoClassProtoImpl(Class> protocolClass, Object protocolImpl) {
+ this.protocolClass = protocolClass;
+ this.protocolImpl = protocolImpl;
+ }
+ }
+
+ private Map protocolImplMap =
+ new HashMap(10);
+
+ // Register protocol and its impl for rpc calls
+ private void registerProtocolAndImpl(Class> protocolClass,
+ Object protocolImpl) throws IOException {
+ String protocolName = RPC.getProtocolName(protocolClass);
+ VersionedProtocol vp = (VersionedProtocol) protocolImpl;
+ long version;
+ try {
+ version = vp.getProtocolVersion(protocolName, 0);
+ } catch (Exception ex) {
+ LOG.warn("Protocol " + protocolClass +
+ " NOT registered as getProtocolVersion throws exception ");
+ return;
+ }
+ protocolImplMap.put(new ProtoNameVer(protocolName, version),
+ new ProtoClassProtoImpl(protocolClass, protocolImpl));
+ LOG.info("ProtocolImpl=" + protocolImpl.getClass().getName() +
+ " protocolClass=" + protocolClass.getName() + " version=" + version);
+ }
+
+ private static class VerProtocolImpl {
+ final long version;
+ final ProtoClassProtoImpl protocolTarget;
+ VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
+ this.version = ver;
+ this.protocolTarget = protocolTarget;
+ }
+ }
+
+
+ @SuppressWarnings("unused") // will be useful later.
+ private VerProtocolImpl[] getSupportedProtocolVersions(
+ String protocolName) {
+ VerProtocolImpl[] resultk = new VerProtocolImpl[protocolImplMap.size()];
+ int i = 0;
+ for (Map.Entry pv :
+ protocolImplMap.entrySet()) {
+ if (pv.getKey().protocol.equals(protocolName)) {
+ resultk[i++] =
+ new VerProtocolImpl(pv.getKey().version, pv.getValue());
+ }
+ }
+ if (i == 0) {
+ return null;
+ }
+ VerProtocolImpl[] result = new VerProtocolImpl[i];
+ System.arraycopy(resultk, 0, result, 0, i);
+ return result;
+ }
+
+ private VerProtocolImpl getHighestSupportedProtocol(String protocolName) {
+ Long highestVersion = 0L;
+ ProtoClassProtoImpl highest = null;
+ for (Map.Entry pv : protocolImplMap
+ .entrySet()) {
+ if (pv.getKey().protocol.equals(protocolName)) {
+ if ((highest == null) || (pv.getKey().version > highestVersion)) {
+ highest = pv.getValue();
+ highestVersion = pv.getKey().version;
+ }
+ }
+ }
+ if (highest == null) {
+ return null;
+ }
+ return new VerProtocolImpl(highestVersion, highest);
+ }
+
/** Construct an RPC server.
* @param instance the instance whose methods will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
+ *
+ * @deprecated Use #Server(Class, Object, Configuration, String, int)
+ *
*/
- public Server(Object instance, Configuration conf, String bindAddress, int port)
+ @Deprecated
+ public Server(Object instance, Configuration conf, String bindAddress,
+ int port)
throws IOException {
- this(instance, conf, bindAddress, port, 1, -1, -1, false, null);
+ this(null, instance, conf, bindAddress, port);
+ }
+
+
+ /** Construct an RPC server.
+ * @param protocol class
+ * @param instance the instance whose methods will be called
+ * @param conf the configuration to use
+ * @param bindAddress the address to bind on to listen for connection
+ * @param port the port to listen for connections on
+ */
+ public Server(Class> protocolClass, Object protocolImpl,
+ Configuration conf, String bindAddress, int port)
+ throws IOException {
+ this(protocolClass, protocolImpl, conf, bindAddress, port, 1, -1, -1,
+ false, null);
}
private static String classNameBase(String className) {
@@ -307,35 +488,103 @@ private static String classNameBase(String className) {
return names[names.length-1];
}
+
/** Construct an RPC server.
- * @param instance the instance whose methods will be called
+ * @param protocolImpl the instance whose methods will be called
+ * @param conf the configuration to use
+ * @param bindAddress the address to bind on to listen for connection
+ * @param port the port to listen for connections on
+ * @param numHandlers the number of method handler threads to run
+ * @param verbose whether each call should be logged
+ *
+ * @deprecated use Server#Server(Class, Object,
+ * Configuration, String, int, int, int, int, boolean, SecretManager)
+ */
+ @Deprecated
+ public Server(Object protocolImpl, Configuration conf, String bindAddress,
+ int port, int numHandlers, int numReaders, int queueSizePerHandler,
+ boolean verbose, SecretManager extends TokenIdentifier> secretManager)
+ throws IOException {
+ this(null, protocolImpl, conf, bindAddress, port,
+ numHandlers, numReaders, queueSizePerHandler, verbose,
+ secretManager);
+
+ }
+
+ /** Construct an RPC server.
+ * @param protocolClass - the protocol being registered
+ * can be null for compatibility with old usage (see below for details)
+ * @param protocolImpl the protocol impl that will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
* @param numHandlers the number of method handler threads to run
* @param verbose whether each call should be logged
*/
- public Server(Object instance, Configuration conf, String bindAddress, int port,
- int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose,
- SecretManager extends TokenIdentifier> secretManager)
+ public Server(Class> protocolClass, Object protocolImpl,
+ Configuration conf, String bindAddress, int port,
+ int numHandlers, int numReaders, int queueSizePerHandler,
+ boolean verbose, SecretManager extends TokenIdentifier> secretManager)
throws IOException {
super(bindAddress, port, Invocation.class, numHandlers, numReaders,
queueSizePerHandler, conf,
- classNameBase(instance.getClass().getName()), secretManager);
- this.instance = instance;
+ classNameBase(protocolImpl.getClass().getName()), secretManager);
+
this.verbose = verbose;
+
+
+ Class>[] protocols;
+ if (protocolClass == null) { // derive protocol from impl
+ /*
+ * In order to remain compatible with the old usage where a single
+ * target protocolImpl is suppled for all protocol interfaces, and
+ * the protocolImpl is derived from the protocolClass(es)
+ * we register all interfaces extended by the protocolImpl
+ */
+ protocols = getProtocolInterfaces(protocolImpl.getClass());
+
+ } else {
+ if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) {
+ throw new IOException("protocolClass "+ protocolClass +
+ " is not implemented by protocolImpl which is of class " +
+ protocolImpl.getClass());
+ }
+ // register protocol class and its super interfaces
+ registerProtocolAndImpl(protocolClass, protocolImpl);
+ protocols = getProtocolInterfaces(protocolClass);
+ }
+ for (Class> p : protocols) {
+ if (!p.equals(VersionedProtocol.class)) {
+ registerProtocolAndImpl(p, protocolImpl);
+ }
+ }
+
}
- public Writable call(Class> protocol, Writable param, long receivedTime)
+
+ @Override
+ public Server
+ addProtocol(
+ Class protocolClass, IMPL protocolImpl) throws IOException {
+ registerProtocolAndImpl(protocolClass, protocolImpl);
+ return this;
+ }
+
+ /**
+ * Process a client call
+ * @param protocolName - the protocol name (the class of the client proxy
+ * used to make calls to the rpc server.
+ * @param param parameters
+ * @param receivedTime time at which the call receoved (for metrics)
+ * @return the call's return
+ * @throws IOException
+ */
+ public Writable call(String protocolName, Writable param, long receivedTime)
throws IOException {
try {
Invocation call = (Invocation)param;
if (verbose) log("Call: " + call);
- Method method = protocol.getMethod(call.getMethodName(),
- call.getParameterClasses());
- method.setAccessible(true);
-
// Verify rpc version
if (call.getRpcVersion() != writableRpcVersion) {
// Client is using a different version of WritableRpc
@@ -344,25 +593,51 @@ public Writable call(Class> protocol, Writable param, long receivedTime)
+ call.getRpcVersion() + ", server side version="
+ writableRpcVersion);
}
-
- //Verify protocol version.
- //Bypass the version check for VersionedProtocol
- if (!method.getDeclaringClass().equals(VersionedProtocol.class)) {
- long clientVersion = call.getProtocolVersion();
- ProtocolSignature serverInfo = ((VersionedProtocol) instance)
- .getProtocolSignature(protocol.getCanonicalName(), call
- .getProtocolVersion(), call.getClientMethodsHash());
- long serverVersion = serverInfo.getVersion();
- if (serverVersion != clientVersion) {
- LOG.warn("Version mismatch: client version=" + clientVersion
- + ", server version=" + serverVersion);
- throw new RPC.VersionMismatch(protocol.getName(), clientVersion,
- serverVersion);
+
+ long clientVersion = call.getProtocolVersion();
+ final String protoName;
+ ProtoClassProtoImpl protocolImpl;
+ if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) {
+ // VersionProtocol methods are often used by client to figure out
+ // which version of protocol to use.
+ //
+ // Versioned protocol methods should go the protocolName protocol
+ // rather than the declaring class of the method since the
+ // the declaring class is VersionedProtocol which is not
+ // registered directly.
+ // Send the call to the highest protocol version
+ protocolImpl =
+ getHighestSupportedProtocol(protocolName).protocolTarget;
+ } else {
+ protoName = call.declaringClassProtocolName;
+
+ // Find the right impl for the protocol based on client version.
+ ProtoNameVer pv =
+ new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
+ protocolImpl = protocolImplMap.get(pv);
+ if (protocolImpl == null) { // no match for Protocol AND Version
+ VerProtocolImpl highest =
+ getHighestSupportedProtocol(protoName);
+ if (highest == null) {
+ throw new IOException("Unknown protocol: " + protoName);
+ } else { // protocol supported but not the version that client wants
+ throw new RPC.VersionMismatch(protoName, clientVersion,
+ highest.version);
+ }
}
}
+
+
+ // Invoke the protocol method
long startTime = System.currentTimeMillis();
- Object value = method.invoke(instance, call.getParameters());
+ Method method =
+ protocolImpl.protocolClass.getMethod(call.getMethodName(),
+ call.getParameterClasses());
+ method.setAccessible(true);
+ rpcDetailedMetrics.init(protocolImpl.protocolClass);
+ Object value =
+ method.invoke(protocolImpl.protocolImpl, call.getParameters());
int processingTime = (int) (System.currentTimeMillis() - startTime);
int qTime = (int) (startTime-receivedTime);
if (LOG.isDebugEnabled()) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
index 6460120012..18dc7a0da7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
@@ -132,6 +132,12 @@ public void emitRecord(String contextName, String recordName,
StringBuilder sb = new StringBuilder();
sb.append(contextName);
sb.append('.');
+
+ if (contextName.equals("jvm") && outRec.getTag("processName") != null) {
+ sb.append(outRec.getTag("processName"));
+ sb.append('.');
+ }
+
sb.append(recordName);
sb.append('.');
int sbBaseLen = sb.length();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
index 8d90101f2c..37f91c9da9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
@@ -20,13 +20,21 @@
import java.io.IOException;
import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
import java.util.Map;
+import java.util.Set;
+import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.MsInfo;
import org.apache.hadoop.metrics2.util.MetricsCache;
import org.apache.hadoop.metrics2.util.MetricsCache.Record;
@@ -38,8 +46,67 @@ public class GangliaSink30 extends AbstractGangliaSink {
public final Log LOG = LogFactory.getLog(this.getClass());
+ private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
+
private MetricsCache metricsCache = new MetricsCache();
+ // a key with a NULL value means ALL
+ private Map> useTagsMap = new HashMap>();
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void init(SubsetConfiguration conf) {
+ super.init(conf);
+
+ conf.setListDelimiter(',');
+ Iterator it = (Iterator) conf.getKeys();
+ while (it.hasNext()) {
+ String propertyName = it.next();
+ if (propertyName.startsWith(TAGS_FOR_PREFIX_PROPERTY_PREFIX)) {
+ String contextName = propertyName.substring(TAGS_FOR_PREFIX_PROPERTY_PREFIX.length());
+ String[] tags = conf.getStringArray(propertyName);
+ boolean useAllTags = false;
+ Set set = null;
+ if (tags.length > 0) {
+ set = new HashSet();
+ for (String tag : tags) {
+ tag = tag.trim();
+ useAllTags |= tag.equals("*");
+ if (tag.length() > 0) {
+ set.add(tag);
+ }
+ }
+ if (useAllTags) {
+ set = null;
+ }
+ }
+ useTagsMap.put(contextName, set);
+ }
+ }
+ }
+
+ @InterfaceAudience.Private
+ public void appendPrefix(MetricsRecord record, StringBuilder sb) {
+ String contextName = record.context();
+ Collection tags = record.tags();
+ if (useTagsMap.containsKey(contextName)) {
+ Set useTags = useTagsMap.get(contextName);
+ for (MetricsTag t : tags) {
+ if (useTags == null || useTags.contains(t.name())) {
+
+ // the context is always skipped here because it is always added
+
+ // the hostname is always skipped to avoid case-mismatches
+ // from different DNSes.
+
+ if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
+ sb.append('.').append(t.name()).append('=').append(t.value());
+ }
+ }
+ }
+ }
+ }
+
@Override
public void putMetrics(MetricsRecord record) {
// The method handles both cases whether Ganglia support dense publish
@@ -53,6 +120,8 @@ public void putMetrics(MetricsRecord record) {
sb.append('.');
sb.append(recordName);
+ appendPrefix(record, sb);
+
String groupName = sb.toString();
sb.append('.');
int sbBaseLen = sb.length();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 7f983f3e3d..cd6ab7b326 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.security;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java
index 35e8d39d6d..36f1943f50 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.alfredo.util.KerberosName;
+import org.apache.hadoop.security.authentication.util.KerberosName;
import sun.security.krb5.Config;
import sun.security.krb5.KrbException;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index cbba1c62e3..46993e16aa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -158,10 +158,7 @@ public static void main(String[] args) throws Throwable {
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
- try {
- FileUtil.fullyDelete(workDir);
- } catch (IOException e) {
- }
+ FileUtil.fullyDelete(workDir);
}
});
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 8fc6017f6d..d4b4030559 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -317,6 +317,11 @@
org.apache.hadoop.hdfs.HsftpFileSystem
+
+ fs.webhdfs.impl
+ org.apache.hadoop.hdfs.web.WebHdfsFileSystem
+
+
fs.ftp.implorg.apache.hadoop.fs.ftp.FTPFileSystem
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
index 9205f640ec..6b3963b41a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
@@ -32,6 +32,7 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.mortbay.log.Log;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
@@ -62,8 +63,6 @@ public abstract class FSMainOperationsBaseTest {
private static String TEST_DIR_AXX = "test/hadoop/axx";
private static int numBlocks = 2;
- static final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
-
protected static FileSystem fSys;
@@ -83,7 +82,7 @@ public boolean accept(Path file) {
}
};
- private static byte[] data = getFileData(numBlocks,
+ protected static final byte[] data = getFileData(numBlocks,
getDefaultBlockSize());
@Before
@@ -183,7 +182,7 @@ public void testWorkingDirectory() throws Exception {
@Test
public void testWDAbsolute() throws IOException {
- Path absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
+ Path absoluteDir = new Path(fSys.getUri() + "/test/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
@@ -646,7 +645,7 @@ public void testWriteReadAndDeleteTwoBlocks() throws Exception {
writeReadAndDelete(getDefaultBlockSize() * 2);
}
- private void writeReadAndDelete(int len) throws IOException {
+ protected void writeReadAndDelete(int len) throws IOException {
Path path = getTestRootPath(fSys, "test/hadoop/file");
fSys.mkdirs(path.getParent());
@@ -768,6 +767,7 @@ public void testRenameNonExistentPath() throws Exception {
rename(src, dst, false, false, false, Rename.NONE);
Assert.fail("Should throw FileNotFoundException");
} catch (IOException e) {
+ Log.info("XXX", e);
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 8b37f2aa6a..94c4b0c31f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -45,7 +45,7 @@
public abstract class FileSystemContractBaseTest extends TestCase {
protected FileSystem fs;
- private byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
+ protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
{
for (int i = 0; i < data.length; i++) {
data[i] = (byte) (i % 10);
@@ -215,7 +215,7 @@ public void testWriteReadAndDeleteTwoBlocks() throws Exception {
writeReadAndDelete(getBlockSize() * 2);
}
- private void writeReadAndDelete(int len) throws IOException {
+ protected void writeReadAndDelete(int len) throws IOException {
Path path = path("/test/hadoop/file");
fs.mkdirs(path.getParent());
@@ -256,7 +256,7 @@ public void testOverwrite() throws IOException {
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
try {
- fs.create(path, false);
+ fs.create(path, false).close();
fail("Should throw IOException.");
} catch (IOException e) {
// Expected
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
index b827ca4378..b2a9e16038 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -17,16 +17,15 @@
*/
package org.apache.hadoop.fs;
-import java.io.DataInputStream;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.net.URI;
+import java.util.Random;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
-
+import static org.junit.Assert.*;
/**
* Helper class for unit tests.
@@ -143,23 +142,33 @@ public static boolean isDir(FileSystem fSys, Path p) throws IOException {
}
}
-
- public static void writeFile(FileSystem fSys, Path path,byte b[])
- throws Exception {
- FSDataOutputStream out =
- fSys.create(path);
- out.write(b);
- out.close();
+ static String writeFile(FileSystem fileSys, Path name, int fileSize)
+ throws IOException {
+ final long seed = 0xDEADBEEFL;
+ // Create and write a file that contains three blocks of data
+ FSDataOutputStream stm = fileSys.create(name);
+ byte[] buffer = new byte[fileSize];
+ Random rand = new Random(seed);
+ rand.nextBytes(buffer);
+ stm.write(buffer);
+ stm.close();
+ return new String(buffer);
}
- public static byte[] readFile(FileSystem fSys, Path path, int len )
- throws Exception {
- DataInputStream dis = fSys.open(path);
- byte[] buffer = new byte[len];
- IOUtils.readFully(dis, buffer, 0, len);
- dis.close();
- return buffer;
+ static String readFile(FileSystem fs, Path name, int buflen)
+ throws IOException {
+ byte[] b = new byte[buflen];
+ int offset = 0;
+ FSDataInputStream in = fs.open(name);
+ for (int remaining, n;
+ (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
+ offset += n);
+ assertEquals(offset, Math.min(b.length, in.getPos()));
+ in.close();
+ String s = new String(b, 0, offset);
+ return s;
}
+
public static FileStatus containsPath(FileSystem fSys, Path path,
FileStatus[] dirList)
throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java
index 8ca095a648..373bdf12d5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java
@@ -18,10 +18,9 @@
package org.apache.hadoop.fs;
-import java.net.URI;
-
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import org.apache.hadoop.conf.Configuration;
import junit.framework.TestCase;
@@ -56,13 +55,13 @@ public void testVerifyChecksum() throws Exception {
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
- TestLocalFileSystem.readFile(localFs, testPath, 128);
- TestLocalFileSystem.readFile(localFs, testPath, 511);
- TestLocalFileSystem.readFile(localFs, testPath, 512);
- TestLocalFileSystem.readFile(localFs, testPath, 513);
- TestLocalFileSystem.readFile(localFs, testPath, 1023);
- TestLocalFileSystem.readFile(localFs, testPath, 1024);
- TestLocalFileSystem.readFile(localFs, testPath, 1025);
+ readFile(localFs, testPath, 128);
+ readFile(localFs, testPath, 511);
+ readFile(localFs, testPath, 512);
+ readFile(localFs, testPath, 513);
+ readFile(localFs, testPath, 1023);
+ readFile(localFs, testPath, 1024);
+ readFile(localFs, testPath, 1025);
localFs.delete(localFs.getChecksumFile(testPath), true);
assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
@@ -74,7 +73,7 @@ public void testVerifyChecksum() throws Exception {
boolean errorRead = false;
try {
- TestLocalFileSystem.readFile(localFs, testPath, 1024);
+ readFile(localFs, testPath, 1024);
}catch(ChecksumException ie) {
errorRead = true;
}
@@ -83,7 +82,7 @@ public void testVerifyChecksum() throws Exception {
//now setting verify false, the read should succeed
try {
localFs.setVerifyChecksum(false);
- String str = TestLocalFileSystem.readFile(localFs, testPath, 1024);
+ String str = readFile(localFs, testPath, 1024).toString();
assertTrue("read", "testing".equals(str));
} finally {
// reset for other tests
@@ -104,13 +103,13 @@ public void testMultiChunkFile() throws Exception {
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
- TestLocalFileSystem.readFile(localFs, testPath, 128);
- TestLocalFileSystem.readFile(localFs, testPath, 511);
- TestLocalFileSystem.readFile(localFs, testPath, 512);
- TestLocalFileSystem.readFile(localFs, testPath, 513);
- TestLocalFileSystem.readFile(localFs, testPath, 1023);
- TestLocalFileSystem.readFile(localFs, testPath, 1024);
- TestLocalFileSystem.readFile(localFs, testPath, 1025);
+ readFile(localFs, testPath, 128);
+ readFile(localFs, testPath, 511);
+ readFile(localFs, testPath, 512);
+ readFile(localFs, testPath, 513);
+ readFile(localFs, testPath, 1023);
+ readFile(localFs, testPath, 1024);
+ readFile(localFs, testPath, 1025);
}
/**
@@ -140,7 +139,7 @@ public void testTruncatedChecksum() throws Exception {
// Now reading the file should fail with a ChecksumException
try {
- TestLocalFileSystem.readFile(localFs, testPath, 1024);
+ readFile(localFs, testPath, 1024);
fail("Did not throw a ChecksumException when reading truncated " +
"crc file");
} catch(ChecksumException ie) {
@@ -149,7 +148,7 @@ public void testTruncatedChecksum() throws Exception {
// telling it not to verify checksums, should avoid issue.
try {
localFs.setVerifyChecksum(false);
- String str = TestLocalFileSystem.readFile(localFs, testPath, 1024);
+ String str = readFile(localFs, testPath, 1024).toString();
assertTrue("read", "testing truncation".equals(str));
} finally {
// reset for other tests
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
index f6cfa1c7ca..ffb1dcf1f1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
@@ -29,7 +29,7 @@ public class TestDU extends TestCase {
final static private File DU_DIR = new File(
System.getProperty("test.build.data","/tmp"), "dutmp");
- public void setUp() throws IOException {
+ public void setUp() {
FileUtil.fullyDelete(DU_DIR);
assertTrue(DU_DIR.mkdirs());
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
index 6c7ac2e82c..ff1d099438 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
@@ -98,7 +98,7 @@ public class TestHardLink {
* @throws IOException
*/
@BeforeClass
- public static void setupClean() throws IOException {
+ public static void setupClean() {
//delete source and target directories if they exist
FileUtil.fullyDelete(src);
FileUtil.fullyDelete(tgt_one);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
index eef90308aa..1e22a73bba 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs;
import java.io.File;
+import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
@@ -208,4 +209,33 @@ public void test4() throws Exception {
}
}
+ /** Two buffer dirs. The first dir does not exist & is on a read-only disk;
+ * The second dir exists & is RW
+ * getLocalPathForWrite with checkAccess set to false should create a parent
+ * directory. With checkAccess true, the directory should not be created.
+ * @throws Exception
+ */
+ public void testLocalPathForWriteDirCreation() throws IOException {
+ try {
+ conf.set(CONTEXT, BUFFER_DIR[0] + "," + BUFFER_DIR[1]);
+ assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
+ BUFFER_ROOT.setReadOnly();
+ Path p1 =
+ dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
+ assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
+
+ Path p2 =
+ dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
+ false);
+ try {
+ localFs.getFileStatus(p2.getParent());
+ } catch (Exception e) {
+ assertEquals(e.getClass(), FileNotFoundException.class);
+ }
+ } finally {
+ Shell.execCommand(new String[] { "chmod", "u+w", BUFFER_DIR_ROOT });
+ rmBufferDirs();
+ }
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 45e0f4a338..6ccc201c55 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -18,37 +18,23 @@
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
+
import java.io.*;
-import junit.framework.*;
+
+import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.Test;
/**
* This class tests the local file system via the FileSystem abstraction.
*/
-public class TestLocalFileSystem extends TestCase {
+public class TestLocalFileSystem {
private static String TEST_ROOT_DIR
= System.getProperty("test.build.data","build/test/data/work-dir/localfs");
-
- static void writeFile(FileSystem fs, Path name) throws IOException {
- FSDataOutputStream stm = fs.create(name);
- stm.writeBytes("42\n");
- stm.close();
- }
-
- static String readFile(FileSystem fs, Path name, int buflen) throws IOException {
- byte[] b = new byte[buflen];
- int offset = 0;
- FSDataInputStream in = fs.open(name);
- for(int remaining, n;
- (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
- offset += n);
- assertEquals(offset, Math.min(b.length, in.getPos()));
- in.close();
-
- String s = new String(b, 0, offset);
- System.out.println("s=" + s);
- return s;
- }
+ private Configuration conf;
+ private FileSystem fileSys;
private void cleanupFile(FileSystem fs, Path name) throws IOException {
assertTrue(fs.exists(name));
@@ -56,12 +42,18 @@ private void cleanupFile(FileSystem fs, Path name) throws IOException {
assertTrue(!fs.exists(name));
}
+ @Before
+ public void setup() throws IOException {
+ conf = new Configuration();
+ fileSys = FileSystem.getLocal(conf);
+ fileSys.delete(new Path(TEST_ROOT_DIR), true);
+ }
+
/**
* Test the capability of setting the working directory.
*/
+ @Test
public void testWorkingDirectory() throws IOException {
- Configuration conf = new Configuration();
- FileSystem fileSys = FileSystem.getLocal(conf);
Path origDir = fileSys.getWorkingDirectory();
Path subdir = new Path(TEST_ROOT_DIR, "new");
try {
@@ -85,7 +77,7 @@ public void testWorkingDirectory() throws IOException {
// create files and manipulate them.
Path file1 = new Path("file1");
Path file2 = new Path("sub/file2");
- writeFile(fileSys, file1);
+ String contents = writeFile(fileSys, file1, 1);
fileSys.copyFromLocalFile(file1, file2);
assertTrue(fileSys.exists(file1));
assertTrue(fileSys.isFile(file1));
@@ -103,11 +95,10 @@ public void testWorkingDirectory() throws IOException {
InputStream stm = fileSys.open(file1);
byte[] buffer = new byte[3];
int bytesRead = stm.read(buffer, 0, 3);
- assertEquals("42\n", new String(buffer, 0, bytesRead));
+ assertEquals(contents, new String(buffer, 0, bytesRead));
stm.close();
} finally {
fileSys.setWorkingDirectory(origDir);
- fileSys.delete(subdir, true);
}
}
@@ -115,6 +106,7 @@ public void testWorkingDirectory() throws IOException {
* test Syncable interface on raw local file system
* @throws IOException
*/
+ @Test
public void testSyncable() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf).getRawFileSystem();
@@ -148,12 +140,13 @@ private void verifyFile(FileSystem fs, Path file, int bytesToVerify,
}
}
+ @Test
public void testCopy() throws IOException {
Configuration conf = new Configuration();
LocalFileSystem fs = FileSystem.getLocal(conf);
Path src = new Path(TEST_ROOT_DIR, "dingo");
Path dst = new Path(TEST_ROOT_DIR, "yak");
- writeFile(fs, src);
+ writeFile(fs, src, 1);
assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf));
assertTrue(!fs.exists(src) && fs.exists(dst));
assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
@@ -170,9 +163,12 @@ public void testCopy() throws IOException {
try {
FileUtil.copy(fs, dst, fs, src, true, true, conf);
fail("Failed to detect existing dir");
- } catch (IOException e) { }
+ } catch (IOException e) {
+ // Expected
+ }
}
+ @Test
public void testHomeDirectory() throws IOException {
Configuration conf = new Configuration();
FileSystem fileSys = FileSystem.getLocal(conf);
@@ -182,16 +178,18 @@ public void testHomeDirectory() throws IOException {
assertEquals(home, fsHome);
}
+ @Test
public void testPathEscapes() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
- writeFile(fs, path);
+ writeFile(fs, path, 1);
FileStatus status = fs.getFileStatus(path);
assertEquals(path.makeQualified(fs), status.getPath());
cleanupFile(fs, path);
}
+ @Test
public void testMkdirs() throws IOException {
Configuration conf = new Configuration();
LocalFileSystem fs = FileSystem.getLocal(conf);
@@ -199,18 +197,40 @@ public void testMkdirs() throws IOException {
Path test_file = new Path(TEST_ROOT_DIR, "file1");
assertTrue(fs.mkdirs(test_dir));
- writeFile(fs, test_file);
+ writeFile(fs, test_file, 1);
// creating dir over a file
Path bad_dir = new Path(test_file, "another_dir");
try {
fs.mkdirs(bad_dir);
fail("Failed to detect existing file in path");
- } catch (FileAlreadyExistsException e) { }
+ } catch (FileAlreadyExistsException e) {
+ // Expected
+ }
try {
fs.mkdirs(null);
fail("Failed to detect null in mkdir arg");
- } catch (IllegalArgumentException e) { }
+ } catch (IllegalArgumentException e) {
+ // Expected
+ }
+ }
+
+ /** Test deleting a file, directory, and non-existent path */
+ @Test
+ public void testBasicDelete() throws IOException {
+ Configuration conf = new Configuration();
+ LocalFileSystem fs = FileSystem.getLocal(conf);
+ Path dir1 = new Path(TEST_ROOT_DIR, "dir1");
+ Path file1 = new Path(TEST_ROOT_DIR, "file1");
+ Path file2 = new Path(TEST_ROOT_DIR+"/dir1", "file2");
+ Path file3 = new Path(TEST_ROOT_DIR, "does-not-exist");
+ assertTrue(fs.mkdirs(dir1));
+ writeFile(fs, file1, 1);
+ writeFile(fs, file2, 1);
+ assertFalse("Returned true deleting non-existant path",
+ fs.delete(file3));
+ assertTrue("Did not delete file", fs.delete(file1));
+ assertTrue("Did not delete non-empty dir", fs.delete(dir1));
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 4149af3c9b..3d739a07d8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -19,9 +19,9 @@
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
@@ -42,14 +42,6 @@ public class TestTrash extends TestCase {
new Path(new File(System.getProperty("test.build.data","/tmp")
).toURI().toString().replace(' ', '+'), "testTrash");
- protected static Path writeFile(FileSystem fs, Path f) throws IOException {
- DataOutputStream out = fs.create(f);
- out.writeBytes("dhruba: " + f);
- out.close();
- assertTrue(fs.exists(f));
- return f;
- }
-
protected static Path mkdir(FileSystem fs, Path p) throws IOException {
assertTrue(fs.mkdirs(p));
assertTrue(fs.exists(p));
@@ -139,7 +131,7 @@ public static void trashShell(final Configuration conf, final Path base,
// Second, create a file in that directory.
Path myFile = new Path(base, "test/mkdirs/myFile");
- writeFile(fs, myFile);
+ writeFile(fs, myFile, 10);
// Verify that expunge without Trash directory
// won't throw Exception
@@ -176,7 +168,7 @@ public static void trashShell(final Configuration conf, final Path base,
}
// Verify that we can recreate the file
- writeFile(fs, myFile);
+ writeFile(fs, myFile, 10);
// Verify that we succeed in removing the file we re-created
{
@@ -194,7 +186,7 @@ public static void trashShell(final Configuration conf, final Path base,
}
// Verify that we can recreate the file
- writeFile(fs, myFile);
+ writeFile(fs, myFile, 10);
// Verify that we succeed in removing the whole directory
// along with the file inside it.
@@ -234,7 +226,7 @@ public static void trashShell(final Configuration conf, final Path base,
{
Path toErase = new Path(trashRoot, "toErase");
int retVal = -1;
- writeFile(trashRootFs, toErase);
+ writeFile(trashRootFs, toErase, 10);
try {
retVal = shell.run(new String[] {"-rm", toErase.toString()});
} catch (Exception e) {
@@ -265,7 +257,7 @@ public static void trashShell(final Configuration conf, final Path base,
// recreate directory and file
mkdir(fs, myPath);
- writeFile(fs, myFile);
+ writeFile(fs, myFile, 10);
// remove file first, then remove directory
{
@@ -316,7 +308,7 @@ public static void trashShell(final Configuration conf, final Path base,
// recreate directory and file
mkdir(fs, myPath);
- writeFile(fs, myFile);
+ writeFile(fs, myFile, 10);
// Verify that skip trash option really skips the trash for files (rm)
{
@@ -346,7 +338,7 @@ public static void trashShell(final Configuration conf, final Path base,
// recreate directory and file
mkdir(fs, myPath);
- writeFile(fs, myFile);
+ writeFile(fs, myFile, 10);
// Verify that skip trash option really skips the trash for rmr
{
@@ -392,7 +384,7 @@ public static void trashShell(final Configuration conf, final Path base,
for(int i=0;i params = request.getParameterMap();
- SortedSet keys = new TreeSet(params.keySet());
+ SortedSet keys = new TreeSet(params.keySet());
for(String key: keys) {
out.print(key);
out.print(':');
@@ -101,7 +101,7 @@ public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
PrintWriter out = response.getWriter();
- SortedSet sortedKeys = new TreeSet();
+ SortedSet sortedKeys = new TreeSet();
Enumeration keys = request.getParameterNames();
while(keys.hasMoreElements()) {
sortedKeys.add(keys.nextElement());
@@ -118,7 +118,6 @@ public void doGet(HttpServletRequest request,
@SuppressWarnings("serial")
public static class HtmlContentServlet extends HttpServlet {
- @SuppressWarnings("unchecked")
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
@@ -131,10 +130,14 @@ public void doGet(HttpServletRequest request,
}
@BeforeClass public static void setup() throws Exception {
- server = createTestServer();
+ Configuration conf = new Configuration();
+ conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+ server = createTestServer(conf);
server.addServlet("echo", "/echo", EchoServlet.class);
server.addServlet("echomap", "/echomap", EchoMapServlet.class);
server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
+ server.addJerseyResourcePackage(
+ JerseyResource.class.getPackage().getName(), "/jersey/*");
server.start();
baseUrl = getServerURL(server);
LOG.info("HTTP server started: "+ baseUrl);
@@ -161,7 +164,8 @@ public void run() {
assertEquals("a:b\nc:d\n",
readOutput(new URL(baseUrl, "/echo?a=b&c=d")));
int serverThreads = server.webServer.getThreadPool().getThreads();
- assertTrue(serverThreads <= MAX_THREADS);
+ assertTrue("More threads are started than expected, Server Threads count: "
+ + serverThreads, serverThreads <= MAX_THREADS);
System.out.println("Number of threads = " + serverThreads +
" which is less or equal than the max = " + MAX_THREADS);
} catch (Exception e) {
@@ -404,4 +408,18 @@ public void testRequestQuoterWithNotNull() throws Exception {
values, parameterValues));
}
+ @SuppressWarnings("unchecked")
+ private static Map parse(String jsonString) {
+ return (Map)JSON.parse(jsonString);
+ }
+
+ @Test public void testJersey() throws Exception {
+ LOG.info("BEGIN testJersey()");
+ final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar"));
+ final Map m = parse(js);
+ LOG.info("m=" + m);
+ assertEquals("foo", m.get(JerseyResource.PATH));
+ assertEquals("bar", m.get(JerseyResource.OP));
+ LOG.info("END testJersey()");
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
new file mode 100644
index 0000000000..f1313e26ca
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http.resource;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.mortbay.util.ajax.JSON;
+
+/**
+ * A simple Jersey resource class TestHttpServer.
+ * The servlet simply puts the path and the op parameter in a map
+ * and return it in JSON format in the response.
+ */
+@Path("")
+public class JerseyResource {
+ static final Log LOG = LogFactory.getLog(JerseyResource.class);
+
+ public static final String PATH = "path";
+ public static final String OP = "op";
+
+ @GET
+ @Path("{" + PATH + ":.*}")
+ @Produces({MediaType.APPLICATION_JSON})
+ public Response get(
+ @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
+ @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op
+ ) throws IOException {
+ LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op);
+
+ final Map m = new TreeMap();
+ m.put(PATH, path);
+ m.put(OP, op);
+ final String js = JSON.toString(m);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
index 6c3f5b1b39..51d044bda6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
@@ -50,7 +50,7 @@ public void checkLoaded() {
}
@Before
- public void setupTestDir() throws IOException {
+ public void setupTestDir() {
FileUtil.fullyDelete(TEST_DIR);
TEST_DIR.mkdirs();
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index cf272026ed..5d04c20023 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -97,7 +97,7 @@ public TestServer(int handlerCount, boolean sleep,
}
@Override
- public Writable call(Class> protocol, Writable param, long receiveTime)
+ public Writable call(String protocol, Writable param, long receiveTime)
throws IOException {
if (sleep) {
// sleep a bit
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
index 3710198295..d4400effa7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
@@ -72,7 +72,7 @@ public TestServer(final int handlerCount, final boolean sleep)
}
@Override
- public Writable call(Class> protocol, Writable param, long receiveTime)
+ public Writable call(String protocol, Writable param, long receiveTime)
throws IOException {
if (sleep) {
try {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
new file mode 100644
index 0000000000..203c2855bc
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.junit.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Test;
+
+public class TestMultipleProtocolServer {
+ private static final String ADDRESS = "0.0.0.0";
+ private static InetSocketAddress addr;
+ private static RPC.Server server;
+
+ private static Configuration conf = new Configuration();
+
+
+ @ProtocolInfo(protocolName="Foo")
+ interface Foo0 extends VersionedProtocol {
+ public static final long versionID = 0L;
+ String ping() throws IOException;
+
+ }
+
+ @ProtocolInfo(protocolName="Foo")
+ interface Foo1 extends VersionedProtocol {
+ public static final long versionID = 1L;
+ String ping() throws IOException;
+ String ping2() throws IOException;
+ }
+
+ @ProtocolInfo(protocolName="Foo")
+ interface FooUnimplemented extends VersionedProtocol {
+ public static final long versionID = 2L;
+ String ping() throws IOException;
+ }
+
+ interface Mixin extends VersionedProtocol{
+ public static final long versionID = 0L;
+ void hello() throws IOException;
+ }
+ interface Bar extends Mixin, VersionedProtocol {
+ public static final long versionID = 0L;
+ int echo(int i) throws IOException;
+ }
+
+
+
+ class Foo0Impl implements Foo0 {
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion)
+ throws IOException {
+ return Foo0.versionID;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ Class extends VersionedProtocol> inter;
+ try {
+ inter = (Class extends VersionedProtocol>)getClass().
+ getGenericInterfaces()[0];
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+ getProtocolVersion(protocol, clientVersion), inter);
+ }
+
+ @Override
+ public String ping() {
+ return "Foo0";
+ }
+
+ }
+
+ class Foo1Impl implements Foo1 {
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion)
+ throws IOException {
+ return Foo1.versionID;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ Class extends VersionedProtocol> inter;
+ try {
+ inter = (Class extends VersionedProtocol>)getClass().
+ getGenericInterfaces()[0];
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+ getProtocolVersion(protocol, clientVersion), inter);
+ }
+
+ @Override
+ public String ping() {
+ return "Foo1";
+ }
+
+ @Override
+ public String ping2() {
+ return "Foo1";
+
+ }
+
+ }
+
+
+ class BarImpl implements Bar {
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion)
+ throws IOException {
+ return Bar.versionID;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ Class extends VersionedProtocol> inter;
+ try {
+ inter = (Class extends VersionedProtocol>)getClass().
+ getGenericInterfaces()[0];
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+ getProtocolVersion(protocol, clientVersion), inter);
+ }
+
+ @Override
+ public int echo(int i) {
+ return i;
+ }
+
+ @Override
+ public void hello() {
+
+
+ }
+ }
+ @Before
+ public void setUp() throws Exception {
+ // create a server with two handlers
+ server = RPC.getServer(Foo0.class,
+ new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
+ server.addProtocol(Foo1.class, new Foo1Impl());
+ server.addProtocol(Bar.class, new BarImpl());
+ server.addProtocol(Mixin.class, new BarImpl());
+ server.start();
+ addr = NetUtils.getConnectAddress(server);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ server.stop();
+ }
+
+ @Test
+ public void test1() throws IOException {
+ ProtocolProxy> proxy;
+ proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
+
+ Foo0 foo0 = (Foo0)proxy.getProxy();
+ Assert.assertEquals("Foo0", foo0.ping());
+
+
+ proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
+
+
+ Foo1 foo1 = (Foo1)proxy.getProxy();
+ Assert.assertEquals("Foo1", foo1.ping());
+ Assert.assertEquals("Foo1", foo1.ping());
+
+
+ proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
+
+
+ Bar bar = (Bar)proxy.getProxy();
+ Assert.assertEquals(99, bar.echo(99));
+
+ // Now test Mixin class method
+
+ Mixin mixin = bar;
+ mixin.hello();
+ }
+
+
+ // Server does not implement the FooUnimplemented version of protocol Foo.
+ // See that calls to it fail.
+ @Test(expected=IOException.class)
+ public void testNonExistingProtocol() throws IOException {
+ ProtocolProxy> proxy;
+ proxy = RPC.getProtocolProxy(FooUnimplemented.class,
+ FooUnimplemented.versionID, addr, conf);
+
+ FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
+ foo.ping();
+ }
+
+
+ /**
+ * getProtocolVersion of an unimplemented version should return highest version
+ * Similarly getProtocolSignature should work.
+ * @throws IOException
+ */
+ @Test
+ public void testNonExistingProtocol2() throws IOException {
+ ProtocolProxy> proxy;
+ proxy = RPC.getProtocolProxy(FooUnimplemented.class,
+ FooUnimplemented.versionID, addr, conf);
+
+ FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
+ Assert.assertEquals(Foo1.versionID,
+ foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),
+ FooUnimplemented.versionID));
+ foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),
+ FooUnimplemented.versionID, 0);
+ }
+
+ @Test(expected=IOException.class)
+ public void testIncorrectServerCreation() throws IOException {
+ RPC.getServer(Foo1.class,
+ new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
index 02ca2afe42..85e60dde9f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
@@ -39,7 +39,7 @@
public class TestRPCCompatibility {
private static final String ADDRESS = "0.0.0.0";
private static InetSocketAddress addr;
- private static Server server;
+ private static RPC.Server server;
private ProtocolProxy> proxy;
public static final Log LOG =
@@ -52,10 +52,12 @@ public interface TestProtocol0 extends VersionedProtocol {
void ping() throws IOException;
}
- public interface TestProtocol1 extends TestProtocol0 {
+ public interface TestProtocol1 extends VersionedProtocol, TestProtocol0 {
String echo(String value) throws IOException;
}
+ @ProtocolInfo(protocolName=
+ "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
public interface TestProtocol2 extends TestProtocol1 {
int echo(int value) throws IOException;
}
@@ -89,11 +91,23 @@ public ProtocolSignature getProtocolSignature(String protocol,
public static class TestImpl1 extends TestImpl0 implements TestProtocol1 {
@Override
public String echo(String value) { return value; }
+ @Override
+ public long getProtocolVersion(String protocol,
+ long clientVersion) throws IOException {
+ return TestProtocol1.versionID;
+ }
}
public static class TestImpl2 extends TestImpl1 implements TestProtocol2 {
@Override
public int echo(int value) { return value; }
+
+ @Override
+ public long getProtocolVersion(String protocol,
+ long clientVersion) throws IOException {
+ return TestProtocol2.versionID;
+ }
+
}
@After
@@ -109,8 +123,10 @@ public void tearDown() throws IOException {
@Test // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
// create a server with two handlers
+ TestImpl1 impl = new TestImpl1();
server = RPC.getServer(TestProtocol1.class,
- new TestImpl1(), ADDRESS, 0, 2, false, conf, null);
+ impl, ADDRESS, 0, 2, false, conf, null);
+ server.addProtocol(TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
@@ -172,8 +188,10 @@ public void ping() throws IOException {
@Test // Compatible new client & old server
public void testVersion2ClientVersion1Server() throws Exception {
// create a server with two handlers
+ TestImpl1 impl = new TestImpl1();
server = RPC.getServer(TestProtocol1.class,
- new TestImpl1(), ADDRESS, 0, 2, false, conf, null);
+ impl, ADDRESS, 0, 2, false, conf, null);
+ server.addProtocol(TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
@@ -190,8 +208,10 @@ public void testVersion2ClientVersion1Server() throws Exception {
@Test // equal version client and server
public void testVersion2ClientVersion2Server() throws Exception {
// create a server with two handlers
+ TestImpl2 impl = new TestImpl2();
server = RPC.getServer(TestProtocol2.class,
- new TestImpl2(), ADDRESS, 0, 2, false, conf, null);
+ impl, ADDRESS, 0, 2, false, conf, null);
+ server.addProtocol(TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
@@ -250,14 +270,16 @@ public void testHashCode() throws Exception {
assertEquals(hash1, hash2);
}
+ @ProtocolInfo(protocolName=
+ "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
public interface TestProtocol4 extends TestProtocol2 {
- public static final long versionID = 1L;
+ public static final long versionID = 4L;
int echo(int value) throws IOException;
}
@Test
public void testVersionMismatch() throws IOException {
- server = RPC.getServer(TestProtocol2.class, new TestImpl0(), ADDRESS, 0, 2,
+ server = RPC.getServer(TestProtocol2.class, new TestImpl2(), ADDRESS, 0, 2,
false, conf, null);
server.start();
addr = NetUtils.getConnectAddress(server);
@@ -268,7 +290,8 @@ public void testVersionMismatch() throws IOException {
proxy.echo(21);
fail("The call must throw VersionMismatch exception");
} catch (IOException ex) {
- Assert.assertTrue(ex.getMessage().contains("VersionMismatch"));
+ Assert.assertTrue("Expected version mismatch but got " + ex.getMessage(),
+ ex.getMessage().contains("VersionMismatch"));
}
}
}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
index 9d78ba77bc..10012348b4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
@@ -26,12 +26,17 @@
import java.net.DatagramSocket;
import java.net.SocketException;
import java.util.ArrayList;
+import java.util.HashSet;
import java.util.List;
+import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
@@ -54,6 +59,44 @@ public class TestGangliaMetrics {
"test.s1rec.S1NumOps",
"test.s1rec.S1AvgTime" };
+ @Test
+ public void testTagsForPrefix() throws Exception {
+ ConfigBuilder cb = new ConfigBuilder()
+ .add("test.sink.ganglia.tagsForPrefix.all", "*")
+ .add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, NumActiveSources")
+ .add("test.sink.ganglia.tagsForPrefix.none", "");
+ GangliaSink30 sink = new GangliaSink30();
+ sink.init(cb.subset("test.sink.ganglia"));
+
+ List tags = new ArrayList();
+ tags.add(new MetricsTag(MsInfo.Context, "all"));
+ tags.add(new MetricsTag(MsInfo.NumActiveSources, "foo"));
+ tags.add(new MetricsTag(MsInfo.NumActiveSinks, "bar"));
+ tags.add(new MetricsTag(MsInfo.NumAllSinks, "haa"));
+ tags.add(new MetricsTag(MsInfo.Hostname, "host"));
+ Set metrics = new HashSet();
+ MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 1, tags, metrics);
+
+ StringBuilder sb = new StringBuilder();
+ sink.appendPrefix(record, sb);
+ assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa", sb.toString());
+
+ tags.set(0, new MetricsTag(MsInfo.Context, "some"));
+ sb = new StringBuilder();
+ sink.appendPrefix(record, sb);
+ assertEquals(".NumActiveSources=foo.NumActiveSinks=bar", sb.toString());
+
+ tags.set(0, new MetricsTag(MsInfo.Context, "none"));
+ sb = new StringBuilder();
+ sink.appendPrefix(record, sb);
+ assertEquals("", sb.toString());
+
+ tags.set(0, new MetricsTag(MsInfo.Context, "nada"));
+ sb = new StringBuilder();
+ sink.appendPrefix(record, sb);
+ assertEquals("", sb.toString());
+ }
+
@Test public void testGangliaMetrics2() throws Exception {
ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
.add("test.sink.gsink30.context", "test") // filter out only "test"
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
index a820cd49b3..7a21e4c6b8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
@@ -18,7 +18,7 @@
import junit.framework.TestCase;
-import org.apache.hadoop.alfredo.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.mockito.Mockito;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 3506b5de71..0cec473c52 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -49,8 +49,7 @@ protected void setUp()
}
@After
- protected void tearDown()
- throws Exception {
+ protected void tearDown() {
FileUtil.fullyDelete(TEST_ROOT_DIR);
}
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index 552b3c76c6..ac196188a7 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -29,6 +29,7 @@
hadoop-auth
+ hadoop-auth-exampleshadoop-commonhadoop-annotations
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a33030c5e5..ea81b8034e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -5,9 +5,31 @@ Trunk (unreleased changes)
HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel
via hairong)
+ IMPROVEMENTS
+
+ HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
+
+ HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
+ HdfsConstants. (Harsh J Chouraria via atm)
+ HDFS-2197. Refactor RPC call implementations out of NameNode class (todd)
+
+ HDFS-2018. Move all journal stream management code into one place.
+ (Ivan Kelly via jitendra)
+
+ HDFS-2223. Untangle depencencies between NN components (todd)
+
BUG FIXES
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
+ HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
+ via atm)
+ HDFS-2310. TestBackupNode fails since HADOOP-7524 went in.
+ (Ivan Kelly via todd)
+
+ HDFS-2313. Rat excludes has a typo for excluding editsStored files. (atm)
+
+ HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
@@ -687,6 +709,9 @@ Release 0.23.0 - Unreleased
HDFS-2266. Add Namesystem and SafeMode interfaces to avoid directly
referring to FSNamesystem in BlockManager. (szetszwo)
+ HDFS-1217. Change some NameNode methods from public to package private.
+ (Laxman via szetszwo)
+
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -1003,6 +1028,9 @@ Release 0.23.0 - Unreleased
HDFS-2286. DataXceiverServer logs AsynchronousCloseException at shutdown
(todd)
+ HDFS-2289. Ensure jsvc is bundled with the HDFS distribution artifact.
+ (Alejandro Abdelnur via acmurthy)
+
BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts.
@@ -1086,6 +1114,7 @@ Release 0.22.0 - Unreleased
(jghoman)
HDFS-1330. Make RPCs to DataNodes timeout. (hairong)
+ Added additional unit tests per HADOOP-6889. (John George via mattf)
HDFS-202. HDFS support of listLocatedStatus introduced in HADOOP-6870.
HDFS piggyback block locations to each file status when listing a
@@ -1541,6 +1570,11 @@ Release 0.22.0 - Unreleased
HDFS-1981. NameNode does not saveNamespace() when editsNew is empty.
(Uma Maheswara Rao G via shv)
+ HDFS-2258. Reset lease limits to default values in TestLeaseRecovery2. (shv)
+
+ HDFS-2232. Generalize regular expressions in TestHDFSCLI.
+ (Plamen Jeliazkov via shv)
+
Release 0.21.1 - Unreleased
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c49c5151ad..9cdab097f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -296,7 +296,7 @@
src/test/all-testssrc/test/resources/*.tgzsrc/test/resources/data*
- src/test/resources/editStored*
+ src/test/resources/editsStored*src/test/resources/empty-filesrc/main/webapps/datanode/robots.txtsrc/main/docs/releasenotes.html
@@ -304,6 +304,56 @@
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ xprepare-package-hadoop-daemon
+ prepare-package
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 8e8b1cfafc..ece940bdd9 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -118,7 +118,7 @@ if [ "$starting_secure_dn" = "true" ]; then
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
fi
- exec "$HADOOP_HDFS_HOME/bin/jsvc" \
+ exec "$HADOOP_HDFS_HOME/libexec/jsvc" \
-Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
-errfile "$HADOOP_LOG_DIR/jsvc.err" \
-pidfile "$HADOOP_SECURE_DN_PID" \
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
index 2bfa2e0bbd..0d3ed89c7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
@@ -505,7 +505,7 @@
using 'bin/hadoop dfsadmin -safemode' command. NameNode front
page shows whether Safemode is on or off. A more detailed
description and configuration is maintained as JavaDoc for
- setSafeMode().
+ setSafeMode().
fsck
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index 642f60be8b..7772ad9792 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -37,7 +37,7 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -70,9 +70,9 @@ public class Hdfs extends AbstractFileSystem {
* @throws IOException
*/
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
- super(theUri, FSConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
+ super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
- if (!theUri.getScheme().equalsIgnoreCase(FSConstants.HDFS_URI_SCHEME)) {
+ if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
}
String host = theUri.getHost();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 237372377e..85639afc1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -60,10 +60,10 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -77,7 +77,7 @@
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -156,14 +156,14 @@ static class Conf {
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
- HdfsConstants.WRITE_TIMEOUT);
+ HdfsServerConstants.WRITE_TIMEOUT);
ioBufferSize = conf.getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
- HdfsConstants.READ_TIMEOUT);
+ HdfsServerConstants.READ_TIMEOUT);
/** dfs.write.packet.size is an internal config variable */
writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
@@ -279,12 +279,12 @@ int getMaxBlockAcquireFailures() {
*/
int getDatanodeWriteTimeout(int numNodes) {
return (dfsClientConf.confTime > 0) ?
- (dfsClientConf.confTime + HdfsConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
+ (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
}
int getDatanodeReadTimeout(int numNodes) {
return dfsClientConf.socketTimeout > 0 ?
- (HdfsConstants.READ_TIMEOUT_EXTENSION * numNodes +
+ (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
dfsClientConf.socketTimeout) : 0;
}
@@ -1046,7 +1046,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
out = new DataOutputStream(
new BufferedOutputStream(NetUtils.getOutputStream(sock),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
in = new DataInputStream(NetUtils.getInputStream(sock));
if (LOG.isDebugEnabled()) {
@@ -1225,7 +1225,7 @@ public DatanodeInfo[] datanodeReport(DatanodeReportType type)
/**
* Enter, leave or get safe mode.
*
- * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
+ * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
*/
public boolean setSafeMode(SafeModeAction action) throws IOException {
return namenode.setSafeMode(action);
@@ -1293,7 +1293,7 @@ public void finalizeUpgrade() throws IOException {
}
/**
- * @see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)
+ * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
*/
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException {
@@ -1392,10 +1392,10 @@ ContentSummary getContentSummary(String src) throws IOException {
void setQuota(String src, long namespaceQuota, long diskspaceQuota)
throws IOException {
// sanity check
- if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET &&
- namespaceQuota != FSConstants.QUOTA_RESET) ||
- (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET &&
- diskspaceQuota != FSConstants.QUOTA_RESET)) {
+ if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
+ namespaceQuota != HdfsConstants.QUOTA_RESET) ||
+ (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
+ diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
throw new IllegalArgumentException("Invalid values for quota : " +
namespaceQuota + " and " +
diskspaceQuota);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 03879338dd..c330297cd3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -47,7 +47,7 @@
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -166,7 +166,7 @@ private class Packet {
this.seqno = HEART_BEAT_SEQNO;
buffer = null;
- int packetSize = PacketHeader.PKT_HEADER_LEN + FSConstants.BYTES_IN_INTEGER;
+ int packetSize = PacketHeader.PKT_HEADER_LEN + HdfsConstants.BYTES_IN_INTEGER;
buf = new byte[packetSize];
checksumStart = dataStart = packetSize;
@@ -234,12 +234,12 @@ ByteBuffer getBuffer() {
dataStart - checksumLen , checksumLen);
}
- int pktLen = FSConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
+ int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
//normally dataStart == checksumPos, i.e., offset is zero.
buffer = ByteBuffer.wrap(
buf, dataStart - checksumPos,
- PacketHeader.PKT_HEADER_LEN + pktLen - FSConstants.BYTES_IN_INTEGER);
+ PacketHeader.PKT_HEADER_LEN + pktLen - HdfsConstants.BYTES_IN_INTEGER);
buf = null;
buffer.mark();
@@ -849,7 +849,7 @@ private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(sock, writeTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
//send the TRANSFER_BLOCK request
new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
@@ -1023,7 +1023,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
//
out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
assert null == blockReplyStream : "Previous blockReplyStream unclosed";
blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
@@ -1173,7 +1173,7 @@ static Socket createSocketForPipeline(final DatanodeInfo first,
final int timeout = client.getDatanodeReadTimeout(length);
NetUtils.connect(sock, isa, timeout);
sock.setSoTimeout(timeout);
- sock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
+ sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
if(DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 5d32c7a05e..e884f1c63f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -48,7 +48,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -646,7 +646,7 @@ static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr,
static ClientProtocol createNamenode(ClientProtocol rpcNamenode)
throws IOException {
RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
- 5, FSConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+ 5, HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map,RetryPolicy> remoteExceptionToPolicyMap =
new HashMap, RetryPolicy>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index ef5ad425c9..68f8616941 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -49,9 +49,9 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -108,7 +108,7 @@ public void initialize(URI uri, Configuration conf) throws IOException {
InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
this.dfs = new DFSClient(namenode, conf, statistics);
- this.uri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
+ this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
this.workingDir = getHomeDirectory();
}
@@ -642,9 +642,9 @@ public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type
* Enter, leave or get safe mode.
*
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
- * FSConstants.SafeModeAction)
+ * HdfsConstants.SafeModeAction)
*/
- public boolean setSafeMode(FSConstants.SafeModeAction action)
+ public boolean setSafeMode(HdfsConstants.SafeModeAction action)
throws IOException {
return dfs.setSafeMode(action);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
index ba26ad2c24..35d45bac32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
@@ -30,7 +30,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
@@ -162,7 +162,7 @@ private synchronized void remove(final LeaseRenewer r) {
/** The time in milliseconds that the map became empty. */
private long emptyTime = Long.MAX_VALUE;
/** A fixed lease renewal time period in milliseconds */
- private long renewal = FSConstants.LEASE_SOFTLIMIT_PERIOD/2;
+ private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2;
/** A daemon for renewing lease */
private Daemon daemon = null;
@@ -352,7 +352,7 @@ synchronized void closeClient(final DFSClient dfsc) {
//update renewal time
if (renewal == dfsc.getHdfsTimeout()/2) {
- long min = FSConstants.LEASE_SOFTLIMIT_PERIOD;
+ long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
for(DFSClient c : dfsclients) {
if (c.getHdfsTimeout() > 0) {
final long timeout = c.getHdfsTimeout();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
index 51311f5216..0be0bb9fb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -40,7 +40,7 @@
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
@@ -394,7 +394,7 @@ public static RemoteBlockReader newBlockReader( Socket sock, String file,
throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
- NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT)));
+ NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
//
@@ -486,7 +486,7 @@ public boolean hasSentStatusCode() {
void sendReadResult(Socket sock, Status statusCode) {
assert !sentStatusCode : "already sent status code to " + sock;
try {
- OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT);
+ OutputStream out = NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT);
ClientReadStatusProto.newBuilder()
.setStatus(statusCode)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 165096f24b..e1006a65d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 4153110ca9..e2ecbaa46d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -578,7 +578,7 @@ public void renewLease(String clientName) throws AccessControlException,
* Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
* otherwise all datanodes if type is ALL.
*/
- public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type)
+ public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
throws IOException;
/**
@@ -601,7 +601,7 @@ public long getPreferredBlockSize(String filename)
*
* Safe mode is entered automatically at name node startup.
* Safe mode can also be entered manually using
- * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
*
* At startup the name node accepts data node reports collecting
* information about block locations.
@@ -617,11 +617,11 @@ public long getPreferredBlockSize(String filename)
* Then the name node leaves safe mode.
*
* If safe mode is turned on manually using
- * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
* then the name node stays in safe mode until it is manually turned off
- * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
+ * using {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
* Current state of the name node can be verified using
- * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
+ * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
*
Configuration parameters:
* dfs.safemode.threshold.pct is the threshold parameter.
* dfs.safemode.extension is the safe mode extension parameter.
@@ -644,7 +644,7 @@ public long getPreferredBlockSize(String filename)
*
* @throws IOException
*/
- public boolean setSafeMode(FSConstants.SafeModeAction action)
+ public boolean setSafeMode(HdfsConstants.SafeModeAction action)
throws IOException;
/**
@@ -685,7 +685,7 @@ public boolean setSafeMode(FSConstants.SafeModeAction action)
/**
* Report distributed upgrade progress or force current upgrade to proceed.
*
- * @param action {@link FSConstants.UpgradeAction} to perform
+ * @param action {@link HdfsConstants.UpgradeAction} to perform
* @return upgrade status information or null if no upgrades are in progress
* @throws IOException
*/
@@ -777,8 +777,8 @@ public ContentSummary getContentSummary(String path)
*
*
* The quota can have three types of values : (1) 0 or more will set
- * the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
- * the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
+ * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies
+ * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET}
* implies the quota will be reset. Any other value is a runtime error.
*
* @throws AccessControlException permission denied
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index f023d73c93..4a456c94f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -26,9 +26,9 @@
*
************************************/
@InterfaceAudience.Private
-public final class FSConstants {
+public final class HdfsConstants {
/* Hidden constructor */
- private FSConstants() {
+ private HdfsConstants() {
}
public static int MIN_BLOCKS_FOR_WRITE = 5;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 260cd7600b..2f224409f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -55,15 +55,15 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
@@ -306,13 +306,13 @@ private void dispatch() {
DataInputStream in = null;
try {
sock.connect(NetUtils.createSocketAddr(
- target.datanode.getName()), HdfsConstants.READ_TIMEOUT);
+ target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
out = new DataOutputStream( new BufferedOutputStream(
- sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
+ sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
sendRequest(out);
in = new DataInputStream( new BufferedInputStream(
- sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
+ sock.getInputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
receiveResponse(in);
bytesMoved.inc(block.getNumBytes());
LOG.info( "Moving block " + block.getBlock().getBlockId() +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index e7c160af25..293d5c5969 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.LightWeightGSet;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index c3f676e3d2..29565ace47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -22,8 +22,8 @@
import java.util.List;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4e45449b1f..682d272922 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -50,8 +50,8 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 1b483a7537..6455b579a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
@@ -439,7 +439,7 @@ private boolean isGoodTarget(DatanodeDescriptor node,
long remaining = node.getRemaining() -
(node.getBlocksScheduled() * blockSize);
// check the remaining capacity of the target machine
- if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
+ if (blockSize* HdfsConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
if(LOG.isDebugEnabled()) {
threadLocalBuilder.get().append(node.toString()).append(": ")
.append("Node ").append(NodeBase.getPath(node))
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a069d761f3..e0c2de955a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -45,7 +45,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index ef83ad2db8..256e5d663e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -29,9 +29,9 @@
************************************/
@InterfaceAudience.Private
-public final class HdfsConstants {
+public final class HdfsServerConstants {
/* Hidden constructor */
- private HdfsConstants() { }
+ private HdfsServerConstants() { }
/**
* Type of the node
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
index 990b089fd7..5f0b2604b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
/**
* The exception is thrown when external version does not match
@@ -34,7 +34,7 @@ public class IncorrectVersionException extends IOException {
private static final long serialVersionUID = 1L;
public IncorrectVersionException(int versionReported, String ofWhat) {
- this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION);
+ this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION);
}
public IncorrectVersionException(int versionReported,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index 81f182d6ca..6e220d6bd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -166,8 +166,8 @@ public static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom)
try {
s = new Socket();
- s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
- s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
+ s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
} catch (IOException e) {
deadNodes.add(chosenNode);
s.close();
@@ -188,8 +188,8 @@ public static void streamBlockInAscii(InetSocketAddress addr, String poolId,
JspWriter out, Configuration conf) throws IOException {
if (chunkSizeToView == 0) return;
Socket s = new Socket();
- s.connect(addr, HdfsConstants.READ_TIMEOUT);
- s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
+ s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 19ad35bb9a..4c11973d4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -32,11 +32,11 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.VersionInfo;
@@ -434,10 +434,10 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage)
this.lock(); // lock storage if it exists
- if (startOpt == HdfsConstants.StartupOption.FORMAT)
+ if (startOpt == HdfsServerConstants.StartupOption.FORMAT)
return StorageState.NOT_FORMATTED;
- if (startOpt != HdfsConstants.StartupOption.IMPORT) {
+ if (startOpt != HdfsServerConstants.StartupOption.IMPORT) {
storage.checkOldLayoutStorage(this);
}
@@ -866,7 +866,7 @@ public static void deleteDir(File dir) throws IOException {
* @throws IOException
*/
public void writeAll() throws IOException {
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
for (Iterator it = storageDirs.iterator(); it.hasNext();) {
writeProperties(it.next());
}
@@ -938,7 +938,7 @@ protected void setClusterId(Properties props, int layoutVersion,
protected void setLayoutVersion(Properties props, StorageDirectory sd)
throws IncorrectVersionException, InconsistentFSStateException {
int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
- if (lv < FSConstants.LAYOUT_VERSION) { // future version
+ if (lv < HdfsConstants.LAYOUT_VERSION) { // future version
throw new IncorrectVersionException(lv, "storage directory "
+ sd.root.getAbsolutePath());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
index 911dd407d4..405006bfb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
@@ -21,7 +21,7 @@
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
/**
@@ -69,7 +69,7 @@ public synchronized boolean initializeUpgrade() throws IOException {
currentUpgrades = getDistributedUpgrades();
if(currentUpgrades == null) {
// set new upgrade state
- setUpgradeState(false, FSConstants.LAYOUT_VERSION);
+ setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);
return false;
}
Upgradeable curUO = currentUpgrades.first();
@@ -85,7 +85,7 @@ public synchronized boolean isUpgradeCompleted() {
return false;
}
- public abstract HdfsConstants.NodeType getType();
+ public abstract HdfsServerConstants.NodeType getType();
public abstract boolean startUpgrade() throws IOException;
public abstract void completeUpgrade() throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
index c2558bed32..b59ef965d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
@@ -22,7 +22,7 @@
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.util.StringUtils;
/**
@@ -40,7 +40,7 @@ public class UpgradeObjectCollection {
static class UOSignature implements Comparable {
int version;
- HdfsConstants.NodeType type;
+ HdfsServerConstants.NodeType type;
String className;
UOSignature(Upgradeable uo) {
@@ -53,7 +53,7 @@ int getVersion() {
return version;
}
- HdfsConstants.NodeType getType() {
+ HdfsServerConstants.NodeType getType() {
return type;
}
@@ -111,13 +111,13 @@ static void registerUpgrade(Upgradeable uo) {
}
public static SortedSet getDistributedUpgrades(int versionFrom,
- HdfsConstants.NodeType type
+ HdfsServerConstants.NodeType type
) throws IOException {
- assert FSConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version "
- + versionFrom + ". Expected to be <= " + FSConstants.LAYOUT_VERSION;
+ assert HdfsConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version "
+ + versionFrom + ". Expected to be <= " + HdfsConstants.LAYOUT_VERSION;
SortedSet upgradeObjects = new TreeSet();
for(UOSignature sig : upgradeTable) {
- if(sig.getVersion() < FSConstants.LAYOUT_VERSION)
+ if(sig.getVersion() < HdfsConstants.LAYOUT_VERSION)
continue;
if(sig.getVersion() > versionFrom)
break;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
index 6081c4cfc6..016fd948e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
@@ -42,7 +42,7 @@ public interface Upgradeable extends Comparable {
* Get the type of the software component, which this object is upgrading.
* @return type
*/
- HdfsConstants.NodeType getType();
+ HdfsServerConstants.NodeType getType();
/**
* Description of the upgrade object for displaying.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index b547701b85..668b45bff8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -30,14 +30,14 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.util.Daemon;
@@ -89,7 +89,7 @@ public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) {
*/
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection dataDirs, StartupOption startOpt) throws IOException {
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()
: "Block-pool and name-node layout versions must be the same.";
// 1. For each BP data directory analyze the state and
@@ -171,7 +171,7 @@ private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOExce
LOG.info("Formatting block pool " + blockpoolID + " directory "
+ bpSdir.getCurrentDir());
bpSdir.clearDirectory(); // create directory
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
this.cTime = nsInfo.getCTime();
this.namespaceID = nsInfo.getNamespaceID();
this.blockpoolID = nsInfo.getBlockPoolID();
@@ -239,7 +239,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd,
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
- assert this.layoutVersion >= FSConstants.LAYOUT_VERSION
+ assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION
: "Future version is not allowed";
if (getNamespaceID() != nsInfo.getNamespaceID()) {
throw new IOException("Incompatible namespaceIDs in "
@@ -253,7 +253,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd,
+ nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
+ blockpoolID);
}
- if (this.layoutVersion == FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
&& this.cTime == nsInfo.getCTime())
return; // regular startup
@@ -261,7 +261,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd,
UpgradeManagerDatanode um =
datanode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID());
verifyDistributedUpgradeProgress(um, nsInfo);
- if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(sd, nsInfo); // upgrade
return;
@@ -327,7 +327,7 @@ void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
// 3. Create new /current with block files hardlinks and VERSION
linkAllBlocks(bpTmpDir, bpCurDir);
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
assert this.namespaceID == nsInfo.getNamespaceID()
: "Data-node and name-node layout versions must be the same.";
this.cTime = nsInfo.getCTime();
@@ -389,7 +389,7 @@ void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo)
// the namespace state or can be further upgraded to it.
// In another word, we can only roll back when ( storedLV >= software LV)
// && ( DN.previousCTime <= NN.ctime)
- if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION &&
+ if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION &&
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
throw new InconsistentFSStateException(bpSd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index b51241ed3f..50e118aaa0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -36,7 +36,7 @@
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
@@ -179,7 +179,7 @@ class BlockReceiver implements Closeable {
this.out = streams.dataOut;
this.cout = streams.checksumOut;
this.checksumOut = new DataOutputStream(new BufferedOutputStream(
- streams.checksumOut, FSConstants.SMALL_BUFFER_SIZE));
+ streams.checksumOut, HdfsConstants.SMALL_BUFFER_SIZE));
// write data chunk header if creating a new replica
if (isCreate) {
BlockMetadataHeader.writeHeader(checksumOut, checksum);
@@ -398,7 +398,7 @@ private void readNextPacket() throws IOException {
buf.limit(bufRead);
}
- while (buf.remaining() < FSConstants.BYTES_IN_INTEGER) {
+ while (buf.remaining() < HdfsConstants.BYTES_IN_INTEGER) {
if (buf.position() > 0) {
shiftBufData();
}
@@ -420,7 +420,7 @@ private void readNextPacket() throws IOException {
// Subtract BYTES_IN_INTEGER since that accounts for the payloadLen that
// we read above.
int pktSize = payloadLen + PacketHeader.PKT_HEADER_LEN
- - FSConstants.BYTES_IN_INTEGER;
+ - HdfsConstants.BYTES_IN_INTEGER;
if (buf.remaining() < pktSize) {
//we need to read more data
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index ac194622a3..b9e3858f3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -32,7 +32,7 @@
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
@@ -155,7 +155,7 @@ class BlockSender implements java.io.Closeable {
if ( !corruptChecksumOk || datanode.data.metaFileExists(block) ) {
checksumIn = new DataInputStream(new BufferedInputStream(datanode.data
- .getMetaDataInputStream(block), FSConstants.IO_FILE_BUFFER_SIZE));
+ .getMetaDataInputStream(block), HdfsConstants.IO_FILE_BUFFER_SIZE));
// read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
@@ -472,14 +472,14 @@ long sendBlock(DataOutputStream out, OutputStream baseStream,
streamForSendChunks = baseStream;
// assure a mininum buffer size.
- maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE,
+ maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE,
MIN_BUFFER_WITH_TRANSFERTO)
+ bytesPerChecksum - 1)/bytesPerChecksum;
// allocate smaller buffer while using transferTo().
pktSize += checksumSize * maxChunksPerPacket;
} else {
- maxChunksPerPacket = Math.max(1, (FSConstants.IO_FILE_BUFFER_SIZE
+ maxChunksPerPacket = Math.max(1, (HdfsConstants.IO_FILE_BUFFER_SIZE
+ bytesPerChecksum - 1) / bytesPerChecksum);
pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index a9c29cc821..edc57fd797 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -68,7 +68,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
@@ -83,9 +83,9 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -438,9 +438,9 @@ private static String getHostName(Configuration config)
private void initConfig(Configuration conf) {
this.socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
- HdfsConstants.READ_TIMEOUT);
+ HdfsServerConstants.READ_TIMEOUT);
this.socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
- HdfsConstants.WRITE_TIMEOUT);
+ HdfsServerConstants.WRITE_TIMEOUT);
/* Based on results on different platforms, we might need set the default
* to false on some of them. */
this.transferToAllowed = conf.getBoolean(
@@ -623,7 +623,7 @@ private void initDataXceiver(Configuration conf) throws IOException {
} else {
ss = secureResources.getStreamingSocket();
}
- ss.setReceiveBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
+ ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
// adjust machine name with the actual port
int tmpPort = ss.getLocalPort();
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
@@ -768,9 +768,9 @@ private NamespaceInfo handshake() throws IOException {
} catch (InterruptedException ie) {}
}
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
"Data-node and name-node layout versions must be the same."
- + "Expected: "+ FSConstants.LAYOUT_VERSION
+ + "Expected: "+ HdfsConstants.LAYOUT_VERSION
+ " actual "+ nsInfo.getLayoutVersion();
return nsInfo;
}
@@ -814,7 +814,7 @@ void setupBPStorage() throws IOException {
if (simulatedFSDataset) {
initFsDataSet(conf, dataDirs);
bpRegistration.setStorageID(getStorageId()); //same as DN
- bpRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
+ bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION;
bpRegistration.storageInfo.namespaceID = bpNSInfo.namespaceID;
bpRegistration.storageInfo.clusterID = bpNSInfo.clusterID;
} else {
@@ -1162,9 +1162,9 @@ void register() throws IOException {
throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer);
}
- if (FSConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) {
+ if (HdfsConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) {
LOG.warn("Data-node and name-node layout versions must be " +
- "the same. Expected: "+ FSConstants.LAYOUT_VERSION +
+ "the same. Expected: "+ HdfsConstants.LAYOUT_VERSION +
" actual "+ bpNSInfo.getLayoutVersion());
throw new IncorrectVersionException
(bpNSInfo.getLayoutVersion(), "namenode");
@@ -1995,10 +1995,10 @@ public void run() {
sock.setSoTimeout(targets.length * socketTimeout);
long writeTimeout = socketWriteTimeout +
- HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
+ HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
out = new DataOutputStream(new BufferedOutputStream(baseStream,
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
blockSender = new BlockSender(b, 0, b.getNumBytes(),
false, false, false, DataNode.this);
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 784ab949ec..488c0188c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -43,15 +43,15 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
@@ -137,8 +137,8 @@ synchronized void recoverTransitionRead(DataNode datanode,
// DN storage has been initialized, no need to do anything
return;
}
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
- "Data-node version " + FSConstants.LAYOUT_VERSION +
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
+ "Data-node version " + HdfsConstants.LAYOUT_VERSION +
" and name-node layout version " + nsInfo.getLayoutVersion() +
" must be the same.";
@@ -268,7 +268,7 @@ static void makeBlockPoolDataDir(Collection dataDirs,
void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
sd.clearDirectory(); // create directory
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
this.clusterID = nsInfo.getClusterID();
this.namespaceID = nsInfo.getNamespaceID();
this.cTime = 0;
@@ -374,7 +374,7 @@ private void doTransition( DataNode datanode,
}
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
- assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
+ assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION :
"Future version is not allowed";
boolean federationSupported =
@@ -397,7 +397,7 @@ private void doTransition( DataNode datanode,
}
// regular start up
- if (this.layoutVersion == FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
&& this.cTime == nsInfo.getCTime())
return; // regular startup
// verify necessity of a distributed upgrade
@@ -406,7 +406,7 @@ private void doTransition( DataNode datanode,
verifyDistributedUpgradeProgress(um, nsInfo);
// do upgrade
- if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+ if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(sd, nsInfo); // upgrade
return;
@@ -482,7 +482,7 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT));
// 4. Write version file under /current
- layoutVersion = FSConstants.LAYOUT_VERSION;
+ layoutVersion = HdfsConstants.LAYOUT_VERSION;
clusterID = nsInfo.getClusterID();
writeProperties(sd);
@@ -542,7 +542,7 @@ void doRollback( StorageDirectory sd,
// We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it.
- if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
+ if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION
&& prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback
throw new InconsistentFSStateException(sd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 374d309504..8d7d95f8aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -41,7 +41,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
@@ -53,7 +53,7 @@
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.io.IOUtils;
@@ -86,7 +86,7 @@ class DataXceiver extends Receiver implements Runnable {
public DataXceiver(Socket s, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
super(new DataInputStream(new BufferedInputStream(
- NetUtils.getInputStream(s), FSConstants.SMALL_BUFFER_SIZE)));
+ NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE)));
this.s = s;
this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
@@ -203,7 +203,7 @@ public void readBlock(final ExtendedBlock block,
OutputStream baseStream = NetUtils.getOutputStream(s,
datanode.socketWriteTimeout);
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
- baseStream, FSConstants.SMALL_BUFFER_SIZE));
+ baseStream, HdfsConstants.SMALL_BUFFER_SIZE));
checkAccess(out, true, block, blockToken,
Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ);
@@ -329,7 +329,7 @@ public void writeBlock(final ExtendedBlock block,
final DataOutputStream replyOut = new DataOutputStream(
new BufferedOutputStream(
NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
checkAccess(replyOut, isClient, block, blockToken,
Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE);
@@ -364,16 +364,16 @@ public void writeBlock(final ExtendedBlock block,
mirrorSock = datanode.newSocket();
try {
int timeoutValue = datanode.socketTimeout
- + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
+ + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = datanode.socketWriteTimeout +
- (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
+ (HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
mirrorSock.setSoTimeout(timeoutValue);
- mirrorSock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
+ mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
mirrorOut = new DataOutputStream(
new BufferedOutputStream(
NetUtils.getOutputStream(mirrorSock, writeTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
new Sender(mirrorOut).writeBlock(originalBlock, blockToken,
@@ -524,7 +524,7 @@ public void blockChecksum(final ExtendedBlock block,
final MetaDataInputStream metadataIn =
datanode.data.getMetaDataInputStream(block);
final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
- metadataIn, FSConstants.IO_FILE_BUFFER_SIZE));
+ metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
updateCurrentThreadName("Getting checksum for block " + block);
try {
@@ -603,7 +603,7 @@ public void copyBlock(final ExtendedBlock block,
OutputStream baseStream = NetUtils.getOutputStream(
s, datanode.socketWriteTimeout);
reply = new DataOutputStream(new BufferedOutputStream(
- baseStream, FSConstants.SMALL_BUFFER_SIZE));
+ baseStream, HdfsConstants.SMALL_BUFFER_SIZE));
// send status first
writeResponse(SUCCESS, reply);
@@ -682,14 +682,14 @@ public void replaceBlock(final ExtendedBlock block,
OutputStream baseStream = NetUtils.getOutputStream(proxySock,
datanode.socketWriteTimeout);
proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream,
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
/* send request to the proxy */
new Sender(proxyOut).copyBlock(block, blockToken);
// receive the response from the proxy
proxyReply = new DataInputStream(new BufferedInputStream(
- NetUtils.getInputStream(proxySock), FSConstants.IO_FILE_BUFFER_SIZE));
+ NetUtils.getInputStream(proxySock), HdfsConstants.IO_FILE_BUFFER_SIZE));
BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
HdfsProtoUtil.vintPrefixed(proxyReply));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index 89928a2971..f192747db5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -30,7 +30,7 @@
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.balancer.Balancer;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
index 8348c8f9c9..5ecdca7b79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
@@ -53,10 +53,10 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
@@ -465,7 +465,7 @@ private long validateIntegrity(File blockFile, long genStamp) {
}
checksumIn = new DataInputStream(
new BufferedInputStream(new FileInputStream(metaFile),
- FSConstants.IO_FILE_BUFFER_SIZE));
+ HdfsConstants.IO_FILE_BUFFER_SIZE));
// read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
index 76b0bba209..d0fc32c769 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
@@ -20,7 +20,7 @@
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
index d2ab20e914..bd0485394a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
/**
* This represents block replicas which are stored in DataNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
index 921437df20..d2a6f46c2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
@@ -20,7 +20,7 @@
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
/** This class represents replicas being written.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index d246f6f8dc..447b9337ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -23,7 +23,7 @@
import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
import org.apache.hadoop.io.IOUtils;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
index c2cb5cfc40..972353962c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
@@ -19,7 +19,7 @@
import java.io.File;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
index 86bef1ea38..91045b7ea5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
@@ -20,7 +20,7 @@
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
index d92b5913da..c6744f9317 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
@@ -27,7 +27,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.http.HttpServer;
import org.mortbay.jetty.nio.SelectChannelConnector;
@@ -71,7 +71,7 @@ public void init(DaemonContext context) throws Exception {
// Obtain secure port for data streaming to datanode
InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
- HdfsConstants.WRITE_TIMEOUT);
+ HdfsServerConstants.WRITE_TIMEOUT);
ServerSocket ss = (socketWriteTimeout > 0) ?
ServerSocketChannel.open().socket() : new ServerSocket();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
index 5fc2f2b5d6..478fb5660d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
@@ -19,8 +19,8 @@
import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeManager;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
@@ -47,8 +47,8 @@ class UpgradeManagerDatanode extends UpgradeManager {
this.bpid = bpid;
}
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.DATA_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.DATA_NODE;
}
synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException {
@@ -57,7 +57,7 @@ synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException {
DataNode.LOG.info("\n Distributed upgrade for DataNode "
+ dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is initialized.");
+ + HdfsConstants.LAYOUT_VERSION + " is initialized.");
UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
curUO.setDatanode(dataNode, this.bpid);
upgradeState = curUO.preUpgradeAction(nsInfo);
@@ -102,7 +102,7 @@ public synchronized boolean startUpgrade() throws IOException {
if(currentUpgrades == null) {
DataNode.LOG.info("\n Distributed upgrade for DataNode version "
+ getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " cannot be started. "
+ + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
+ "The upgrade object is not defined.");
return false;
}
@@ -115,7 +115,7 @@ public synchronized boolean startUpgrade() throws IOException {
DataNode.LOG.info("\n Distributed upgrade for DataNode "
+ dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is started.");
+ + HdfsConstants.LAYOUT_VERSION + " is started.");
return true;
}
@@ -130,7 +130,7 @@ synchronized void processUpgradeCommand(UpgradeCommand command
throw new IOException(
"Distributed upgrade for DataNode " + dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " cannot be started. "
+ + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
+ "The upgrade object is not defined.");
}
@@ -145,7 +145,7 @@ public synchronized void completeUpgrade() throws IOException {
DataNode.LOG.info("\n Distributed upgrade for DataNode "
+ dataNode.getMachineName()
+ " version " + getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is complete.");
+ + HdfsConstants.LAYOUT_VERSION + " is complete.");
}
synchronized void shutdownUpgrade() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
index 9e51f230f9..ddb1d6029f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeObject;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
@@ -36,8 +36,8 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run
private DataNode dataNode = null;
private String bpid = null;
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.DATA_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.DATA_NODE;
}
protected DataNode getDatanode() {
@@ -118,7 +118,7 @@ public void run() {
if(getUpgradeStatus() < 100) {
DataNode.LOG.info("\n Distributed upgrade for DataNode version "
+ getVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " cannot be completed.");
+ + HdfsConstants.LAYOUT_VERSION + " cannot be completed.");
}
// Complete the upgrade by calling the manager method
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index d72509cee2..dd68261253 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -20,20 +20,21 @@
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.IOException;
+import java.util.Collection;
import java.util.Iterator;
+import java.util.List;
import java.util.zip.Checksum;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.LogLoadPlan;
-import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
/**
* Extension of FSImage for the backup node.
@@ -81,6 +82,8 @@ static enum BNState {
* {@see #freezeNamespaceAtNextRoll()}
*/
private boolean stopApplyingEditsOnNextRoll = false;
+
+ private FSNamesystem namesystem;
/**
* Construct a backup image.
@@ -92,6 +95,10 @@ static enum BNState {
storage.setDisablePreUpgradableLayoutCheck(true);
bnState = BNState.DROP_UNTIL_NEXT_ROLL;
}
+
+ void setNamesystem(FSNamesystem fsn) {
+ this.namesystem = fsn;
+ }
/**
* Analyze backup storage directories for consistency.
@@ -106,7 +113,7 @@ void recoverCreateRead() throws IOException {
StorageDirectory sd = it.next();
StorageState curState;
try {
- curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
+ curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
@@ -140,7 +147,7 @@ void recoverCreateRead() throws IOException {
* and create empty edits.
*/
void saveCheckpoint() throws IOException {
- saveNamespace();
+ saveNamespace(namesystem);
}
/**
@@ -223,7 +230,7 @@ private synchronized void applyEdits(long firstTxId, int numTxns, byte[] data)
}
lastAppliedTxId += numTxns;
- getFSNamesystem().dir.updateCountForINodeWithQuota(); // inefficient!
+ namesystem.dir.updateCountForINodeWithQuota(); // inefficient!
} finally {
backupInputStream.clear();
}
@@ -261,11 +268,18 @@ private boolean tryConvergeJournalSpool() throws IOException {
new FSImageTransactionalStorageInspector();
storage.inspectStorageDirs(inspector);
- LogLoadPlan logLoadPlan = inspector.createLogLoadPlan(lastAppliedTxId,
- target - 1);
-
- logLoadPlan.doRecovery();
- loadEdits(logLoadPlan.getEditsFiles());
+
+ editLog.recoverUnclosedStreams();
+ Iterable editStreamsAll
+ = editLog.selectInputStreams(lastAppliedTxId, target - 1);
+ // remove inprogress
+ List editStreams = Lists.newArrayList();
+ for (EditLogInputStream s : editStreamsAll) {
+ if (s.getFirstTxId() != editLog.getCurSegmentTxId()) {
+ editStreams.add(s);
+ }
+ }
+ loadEdits(editStreams, namesystem);
}
// now, need to load the in-progress file
@@ -275,7 +289,24 @@ private boolean tryConvergeJournalSpool() throws IOException {
return false; // drop lock and try again to load local logs
}
- EditLogInputStream stream = getEditLog().getInProgressFileInputStream();
+ EditLogInputStream stream = null;
+ Collection editStreams
+ = getEditLog().selectInputStreams(
+ getEditLog().getCurSegmentTxId(),
+ getEditLog().getCurSegmentTxId());
+
+ for (EditLogInputStream s : editStreams) {
+ if (s.getFirstTxId() == getEditLog().getCurSegmentTxId()) {
+ stream = s;
+ }
+ break;
+ }
+ if (stream == null) {
+ LOG.warn("Unable to find stream starting with " + editLog.getCurSegmentTxId()
+ + ". This indicates that there is an error in synchronization in BackupImage");
+ return false;
+ }
+
try {
long remainingTxns = getEditLog().getLastWrittenTxId() - lastAppliedTxId;
@@ -289,7 +320,7 @@ private boolean tryConvergeJournalSpool() throws IOException {
"expected to load " + remainingTxns + " but loaded " +
numLoaded + " from " + stream;
} finally {
- IOUtils.closeStream(stream);
+ FSEditLog.closeAllStreams(editStreams);
}
LOG.info("Successfully synced BackupNode with NameNode at txnid " +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
index 3cac6676f1..6976620341 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
@@ -58,12 +58,31 @@ public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {
}
+ @Override
+ public long getNumberOfTransactions(long fromTxnId)
+ throws IOException, CorruptionException {
+ // This JournalManager is never used for input. Therefore it cannot
+ // return any transactions
+ return 0;
+ }
+
+ @Override
+ public EditLogInputStream getInputStream(long fromTxnId) throws IOException {
+ // This JournalManager is never used for input. Therefore it cannot
+ // return any transactions
+ throw new IOException("Unsupported operation");
+ }
+
+ @Override
+ public void recoverUnfinalizedSegments() throws IOException {
+ }
+
public boolean matchesRegistration(NamenodeRegistration bnReg) {
return bnReg.getAddress().equals(this.bnReg.getAddress());
}
@Override
- public EditLogInputStream getInProgressInputStream(long segmentStartsAtTxId) {
- return null;
+ public String toString() {
+ return "BackupJournalManager";
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 25667b65a2..d8f68a0aaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -26,8 +26,8 @@
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -52,7 +52,7 @@
*
*/
@InterfaceAudience.Private
-public class BackupNode extends NameNode implements JournalProtocol {
+public class BackupNode extends NameNode {
private static final String BN_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;
private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
@@ -95,18 +95,20 @@ protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) throw
}
@Override // NameNode
- protected void setRpcServerAddress(Configuration conf) {
- conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(rpcAddress));
+ protected void setRpcServerAddress(Configuration conf,
+ InetSocketAddress addr) {
+ conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(addr));
}
@Override // Namenode
- protected void setRpcServiceServerAddress(Configuration conf) {
- conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(serviceRPCAddress));
+ protected void setRpcServiceServerAddress(Configuration conf,
+ InetSocketAddress addr) {
+ conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(addr));
}
@Override // NameNode
protected InetSocketAddress getHttpServerAddress(Configuration conf) {
- assert rpcAddress != null : "rpcAddress should be calculated first";
+ assert getNameNodeAddress() != null : "rpcAddress should be calculated first";
String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT);
return NetUtils.createSocketAddr(addr);
}
@@ -120,6 +122,7 @@ protected void setHttpServerAddress(Configuration conf){
protected void loadNamesystem(Configuration conf) throws IOException {
BackupImage bnImage = new BackupImage(conf);
this.namesystem = new FSNamesystem(conf, bnImage);
+ bnImage.setNamesystem(namesystem);
bnImage.recoverCreateRead();
}
@@ -134,7 +137,7 @@ protected void initialize(Configuration conf) throws IOException {
// Backup node should never do lease recovery,
// therefore lease hard limit should never expire.
namesystem.leaseManager.setLeasePeriod(
- FSConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
+ HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
clusterId = nsInfo.getClusterID();
blockPoolId = nsInfo.getBlockPoolID();
@@ -145,6 +148,12 @@ protected void initialize(Configuration conf) throws IOException {
runCheckpointDaemon(conf);
}
+ @Override
+ protected NameNodeRpcServer createRpcServer(Configuration conf)
+ throws IOException {
+ return new BackupNodeRpcServer(conf, this);
+ }
+
@Override // NameNode
public void stop() {
if(checkpointManager != null) {
@@ -177,48 +186,58 @@ public void stop() {
super.stop();
}
-
- @Override
- public long getProtocolVersion(String protocol, long clientVersion)
- throws IOException {
- if (protocol.equals(JournalProtocol.class.getName())) {
- return JournalProtocol.versionID;
- } else {
- return super.getProtocolVersion(protocol, clientVersion);
+ static class BackupNodeRpcServer extends NameNodeRpcServer implements JournalProtocol {
+ private final String nnRpcAddress;
+
+ private BackupNodeRpcServer(Configuration conf, BackupNode nn)
+ throws IOException {
+ super(conf, nn);
+ this.server.addProtocol(JournalProtocol.class, this);
+ nnRpcAddress = nn.nnRpcAddress;
+ }
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion)
+ throws IOException {
+ if (protocol.equals(JournalProtocol.class.getName())) {
+ return JournalProtocol.versionID;
+ } else {
+ return super.getProtocolVersion(protocol, clientVersion);
+ }
+ }
+
+ /////////////////////////////////////////////////////
+ // BackupNodeProtocol implementation for backup node.
+ /////////////////////////////////////////////////////
+ @Override
+ public void startLogSegment(NamenodeRegistration registration, long txid)
+ throws IOException {
+ nn.checkOperation(OperationCategory.JOURNAL);
+ verifyRequest(registration);
+ verifyRequest(registration);
+
+ getBNImage().namenodeStartedLogSegment(txid);
+ }
+
+ @Override
+ public void journal(NamenodeRegistration nnReg,
+ long firstTxId, int numTxns,
+ byte[] records) throws IOException {
+ nn.checkOperation(OperationCategory.JOURNAL);
+ verifyRequest(nnReg);
+ if(!nnRpcAddress.equals(nnReg.getAddress()))
+ throw new IOException("Journal request from unexpected name-node: "
+ + nnReg.getAddress() + " expecting " + nnRpcAddress);
+ getBNImage().journal(firstTxId, numTxns, records);
+ }
+
+ private BackupImage getBNImage() {
+ return (BackupImage)nn.getFSImage();
}
}
-
- /////////////////////////////////////////////////////
- // BackupNodeProtocol implementation for backup node.
- /////////////////////////////////////////////////////
-
- @Override
- public void journal(NamenodeRegistration nnReg,
- long firstTxId, int numTxns,
- byte[] records) throws IOException {
- checkOperation(OperationCategory.JOURNAL);
- verifyRequest(nnReg);
- if(!nnRpcAddress.equals(nnReg.getAddress()))
- throw new IOException("Journal request from unexpected name-node: "
- + nnReg.getAddress() + " expecting " + nnRpcAddress);
- getBNImage().journal(firstTxId, numTxns, records);
- }
-
- @Override
- public void startLogSegment(NamenodeRegistration registration, long txid)
- throws IOException {
- checkOperation(OperationCategory.JOURNAL);
- verifyRequest(registration);
- getBNImage().namenodeStartedLogSegment(txid);
- }
-
//////////////////////////////////////////////////////
-
- BackupImage getBNImage() {
- return (BackupImage)getFSImage();
- }
boolean shouldCheckpointAtStartup() {
FSImage fsImage = getFSImage();
@@ -330,9 +349,9 @@ private static NamespaceInfo handshake(NamenodeProtocol namenode)
LOG.fatal(errorMsg);
throw new IOException(errorMsg);
}
- assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
+ assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
"Active and backup node layout versions must be the same. Expected: "
- + FSConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
+ + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
return nsInfo;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java
index e4de6345b6..2a41aeeb9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java
@@ -69,7 +69,7 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res
try {
ugi.doAs(new PrivilegedExceptionAction() {
public Void run() throws Exception {
- nn.cancelDelegationToken(token);
+ nn.getRpcServer().cancelDelegationToken(token);
return null;
}
});
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index f75410031d..5e544c6695 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -30,7 +30,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@@ -224,7 +224,7 @@ void doCheckpoint() throws IOException {
LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
File file = bnStorage.findImageFile(sig.mostRecentCheckpointTxId);
- bnImage.reloadFromImageFile(file);
+ bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
}
lastApplied = bnImage.getLastAppliedTxId();
@@ -238,11 +238,11 @@ void doCheckpoint() throws IOException {
backupNode.nnHttpAddress, log, bnStorage);
}
- rollForwardByApplyingLogs(manifest, bnImage);
+ rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
}
long txid = bnImage.getLastAppliedTxId();
- bnImage.saveFSImageInAllDirs(txid);
+ bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
bnStorage.writeAll();
if(cpCmd.needToReturnImage()) {
@@ -272,19 +272,21 @@ private InetSocketAddress getImageListenAddress() {
static void rollForwardByApplyingLogs(
RemoteEditLogManifest manifest,
- FSImage dstImage) throws IOException {
+ FSImage dstImage,
+ FSNamesystem dstNamesystem) throws IOException {
NNStorage dstStorage = dstImage.getStorage();
- List editsFiles = Lists.newArrayList();
+ List editsStreams = Lists.newArrayList();
for (RemoteEditLog log : manifest.getLogs()) {
File f = dstStorage.findFinalizedEditsFile(
log.getStartTxId(), log.getEndTxId());
if (log.getStartTxId() > dstImage.getLastAppliedTxId()) {
- editsFiles.add(f);
- }
+ editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(),
+ log.getEndTxId()));
+ }
}
LOG.info("Checkpointer about to load edits from " +
- editsFiles.size() + " file(s).");
- dstImage.loadEdits(editsFiles);
+ editsStreams.size() + " stream(s).");
+ dstImage.loadEdits(editsStreams, dstNamesystem);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
index ea0f392a3d..1c8253f665 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
@@ -73,7 +73,7 @@ protected ClientProtocol createNameNodeProxy() throws IOException {
// rpc
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
- return nn;
+ return nn.getRpcServer();
}
InetSocketAddress nnAddr =
NameNodeHttpServer.getNameNodeAddressFromContext(context);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
index 8921bc0c55..974697d927 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
@@ -21,6 +21,7 @@
import java.io.ByteArrayInputStream;
import java.io.IOException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import com.google.common.base.Preconditions;
/**
@@ -122,4 +123,14 @@ void clear() throws IOException {
reader = null;
this.version = 0;
}
+
+ @Override
+ public long getFirstTxId() throws IOException {
+ return HdfsConstants.INVALID_TXID;
+ }
+
+ @Override
+ public long getLastTxId() throws IOException {
+ return HdfsConstants.INVALID_TXID;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index 532b2f2dcf..9db7f8ae66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -24,10 +24,11 @@
import java.io.BufferedInputStream;
import java.io.EOFException;
import java.io.DataInputStream;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import com.google.common.annotations.VisibleForTesting;
@@ -38,12 +39,15 @@
class EditLogFileInputStream extends EditLogInputStream {
private final File file;
private final FileInputStream fStream;
+ final private long firstTxId;
+ final private long lastTxId;
private final int logVersion;
private final FSEditLogOp.Reader reader;
private final FSEditLogLoader.PositionTrackingInputStream tracker;
/**
* Open an EditLogInputStream for the given file.
+ * The file is pretransactional, so has no txids
* @param name filename to open
* @throws LogHeaderCorruptException if the header is either missing or
* appears to be corrupt/truncated
@@ -52,6 +56,21 @@ class EditLogFileInputStream extends EditLogInputStream {
*/
EditLogFileInputStream(File name)
throws LogHeaderCorruptException, IOException {
+ this(name, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID);
+ }
+
+ /**
+ * Open an EditLogInputStream for the given file.
+ * @param name filename to open
+ * @param firstTxId first transaction found in file
+ * @param lastTxId last transaction id found in file
+ * @throws LogHeaderCorruptException if the header is either missing or
+ * appears to be corrupt/truncated
+ * @throws IOException if an actual IO error occurs while reading the
+ * header
+ */
+ EditLogFileInputStream(File name, long firstTxId, long lastTxId)
+ throws LogHeaderCorruptException, IOException {
file = name;
fStream = new FileInputStream(name);
@@ -66,6 +85,18 @@ class EditLogFileInputStream extends EditLogInputStream {
}
reader = new FSEditLogOp.Reader(in, logVersion);
+ this.firstTxId = firstTxId;
+ this.lastTxId = lastTxId;
+ }
+
+ @Override
+ public long getFirstTxId() throws IOException {
+ return firstTxId;
+ }
+
+ @Override
+ public long getLastTxId() throws IOException {
+ return lastTxId;
}
@Override // JournalStream
@@ -117,7 +148,8 @@ static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOExc
// If it's missing its header, this is equivalent to no transactions
FSImage.LOG.warn("Log at " + file + " has no valid header",
corrupt);
- return new FSEditLogLoader.EditLogValidation(0, 0);
+ return new FSEditLogLoader.EditLogValidation(0, HdfsConstants.INVALID_TXID,
+ HdfsConstants.INVALID_TXID);
}
try {
@@ -143,11 +175,11 @@ static int readLogVersion(DataInputStream in)
throw new LogHeaderCorruptException(
"Reached EOF when reading log header");
}
- if (logVersion < FSConstants.LAYOUT_VERSION) { // future version
+ if (logVersion < HdfsConstants.LAYOUT_VERSION) { // future version
throw new LogHeaderCorruptException(
"Unexpected version of the file system log file: "
+ logVersion + ". Current version = "
- + FSConstants.LAYOUT_VERSION + ".");
+ + HdfsConstants.LAYOUT_VERSION + ".");
}
assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION :
"Unsupported version " + logVersion;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
index f79f44266e..be75f637a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
@@ -27,7 +27,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.IOUtils;
import com.google.common.annotations.VisibleForTesting;
@@ -109,7 +109,7 @@ void writeRaw(byte[] bytes, int offset, int length) throws IOException {
void create() throws IOException {
fc.truncate(0);
fc.position(0);
- doubleBuf.getCurrentBuf().writeInt(FSConstants.LAYOUT_VERSION);
+ doubleBuf.getCurrentBuf().writeInt(HdfsConstants.LAYOUT_VERSION);
setReadyToFlush();
flush();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
index 52a3dd4c20..c6f850542f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
@@ -28,6 +28,17 @@
* into the #{@link EditLogOutputStream}.
*/
abstract class EditLogInputStream implements JournalStream, Closeable {
+ /**
+ * @return the first transaction which will be found in this stream
+ */
+ public abstract long getFirstTxId() throws IOException;
+
+ /**
+ * @return the last transaction which will be found in this stream
+ */
+ public abstract long getLastTxId() throws IOException;
+
+
/**
* Close the stream.
* @throws IOException if an error occurred while closing
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
index 0dd90588f4..5312b145ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
@@ -20,7 +20,7 @@
import java.io.IOException;
import java.io.OutputStream;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
@@ -129,7 +129,7 @@ public TxnBuffer(int initBufferSize) {
}
public void writeOp(FSEditLogOp op) throws IOException {
- if (firstTxId == FSConstants.INVALID_TXID) {
+ if (firstTxId == HdfsConstants.INVALID_TXID) {
firstTxId = op.txid;
} else {
assert op.txid > firstTxId;
@@ -141,7 +141,7 @@ public void writeOp(FSEditLogOp op) throws IOException {
@Override
public DataOutputBuffer reset() {
super.reset();
- firstTxId = FSConstants.INVALID_TXID;
+ firstTxId = HdfsConstants.INVALID_TXID;
numTxns = 0;
return this;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 4ad7c7e451..4d7f2b9ca6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -43,7 +43,7 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@@ -55,10 +55,11 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.util.ByteArray;
+import com.google.common.base.Preconditions;
+
/*************************************************
* FSDirectory stores the filesystem directory state.
* It handles writing/loading values to disk, and logging
@@ -72,6 +73,7 @@ public class FSDirectory implements Closeable {
INodeDirectoryWithQuota rootDir;
FSImage fsImage;
+ private final FSNamesystem namesystem;
private volatile boolean ready = false;
private static final long UNKNOWN_DISK_SPACE = -1;
private final int maxComponentLength;
@@ -113,15 +115,9 @@ boolean hasReadLock() {
*/
private final NameCache nameCache;
- /** Access an existing dfs name directory. */
- FSDirectory(FSNamesystem ns, Configuration conf) throws IOException {
- this(new FSImage(conf), ns, conf);
- }
-
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
this.dirLock = new ReentrantReadWriteLock(true); // fair
this.cond = dirLock.writeLock().newCondition();
- fsImage.setFSNamesystem(ns);
rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
ns.createFsOwnerPermissions(new FsPermission((short)0755)),
Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
@@ -145,10 +141,11 @@ boolean hasReadLock() {
NameNode.LOG.info("Caching file names occuring more than " + threshold
+ " times ");
nameCache = new NameCache(threshold);
+ namesystem = ns;
}
private FSNamesystem getFSNamesystem() {
- return fsImage.getFSNamesystem();
+ return namesystem;
}
private BlockManager getBlockManager() {
@@ -156,33 +153,11 @@ private BlockManager getBlockManager() {
}
/**
- * Load the filesystem image into memory.
- *
- * @param startOpt Startup type as specified by the user.
- * @throws IOException If image or editlog cannot be read.
+ * Notify that loading of this FSDirectory is complete, and
+ * it is ready for use
*/
- void loadFSImage(StartupOption startOpt)
- throws IOException {
- // format before starting up if requested
- if (startOpt == StartupOption.FORMAT) {
- fsImage.format(fsImage.getStorage().determineClusterId());// reuse current id
-
- startOpt = StartupOption.REGULAR;
- }
- boolean success = false;
- try {
- if (fsImage.recoverTransitionRead(startOpt)) {
- fsImage.saveNamespace();
- }
- fsImage.openEditLog();
-
- fsImage.setCheckpointDirectories(null, null);
- success = true;
- } finally {
- if (!success) {
- fsImage.close();
- }
- }
+ void imageLoadComplete() {
+ Preconditions.checkState(!ready, "FSDirectory already loaded");
writeLock();
try {
setReady(true);
@@ -1876,10 +1851,10 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
UnresolvedLinkException {
assert hasWriteLock();
// sanity check
- if ((nsQuota < 0 && nsQuota != FSConstants.QUOTA_DONT_SET &&
- nsQuota < FSConstants.QUOTA_RESET) ||
- (dsQuota < 0 && dsQuota != FSConstants.QUOTA_DONT_SET &&
- dsQuota < FSConstants.QUOTA_RESET)) {
+ if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET &&
+ nsQuota < HdfsConstants.QUOTA_RESET) ||
+ (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET &&
+ dsQuota < HdfsConstants.QUOTA_RESET)) {
throw new IllegalArgumentException("Illegal value for nsQuota or " +
"dsQuota : " + nsQuota + " and " +
dsQuota);
@@ -1893,16 +1868,16 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
throw new FileNotFoundException("Directory does not exist: " + srcs);
} else if (!targetNode.isDirectory()) {
throw new FileNotFoundException("Cannot set quota on a file: " + srcs);
- } else if (targetNode.isRoot() && nsQuota == FSConstants.QUOTA_RESET) {
+ } else if (targetNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Cannot clear namespace quota on root.");
} else { // a directory inode
INodeDirectory dirNode = (INodeDirectory)targetNode;
long oldNsQuota = dirNode.getNsQuota();
long oldDsQuota = dirNode.getDsQuota();
- if (nsQuota == FSConstants.QUOTA_DONT_SET) {
+ if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
nsQuota = oldNsQuota;
}
- if (dsQuota == FSConstants.QUOTA_DONT_SET) {
+ if (dsQuota == HdfsConstants.QUOTA_DONT_SET) {
dsQuota = oldDsQuota;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 495c42e45a..e355a9d838 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
+import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
@@ -29,17 +30,19 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.apache.hadoop.io.IOUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -91,7 +94,7 @@ private enum State {
// the first txid of the log that's currently open for writing.
// If this value is N, we are currently writing to edits_inprogress_N
- private long curSegmentTxId = FSConstants.INVALID_TXID;
+ private long curSegmentTxId = HdfsConstants.INVALID_TXID;
// the time of printing the statistics to the log file.
private long lastPrintTime;
@@ -904,7 +907,7 @@ public void purgeLogsOlderThan(final long minTxIdToKeep) {
// synchronized to prevent findbugs warning about inconsistent
// synchronization. This will be JIT-ed out if asserts are
// off.
- assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op
+ assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
minTxIdToKeep <= curSegmentTxId :
"cannot purge logs older than txid " + minTxIdToKeep +
" when current segment starts at " + curSegmentTxId;
@@ -1068,6 +1071,112 @@ private void disableAndReportErrorOnJournals(List badJournals)
}
}
+ /**
+ * Find the best editlog input stream to read from txid. In this case
+ * best means the editlog which has the largest continuous range of
+ * transactions starting from the transaction id, fromTxId.
+ *
+ * If a journal throws an CorruptionException while reading from a txn id,
+ * it means that it has more transactions, but can't find any from fromTxId.
+ * If this is the case and no other journal has transactions, we should throw
+ * an exception as it means more transactions exist, we just can't load them.
+ *
+ * @param fromTxId Transaction id to start from.
+ * @return a edit log input stream with tranactions fromTxId
+ * or null if no more exist
+ */
+ private EditLogInputStream selectStream(long fromTxId)
+ throws IOException {
+ JournalManager bestjm = null;
+ long bestjmNumTxns = 0;
+ CorruptionException corruption = null;
+
+ for (JournalAndStream jas : journals) {
+ JournalManager candidate = jas.getManager();
+ long candidateNumTxns = 0;
+ try {
+ candidateNumTxns = candidate.getNumberOfTransactions(fromTxId);
+ } catch (CorruptionException ce) {
+ corruption = ce;
+ } catch (IOException ioe) {
+ LOG.warn("Error reading number of transactions from " + candidate);
+ continue; // error reading disk, just skip
+ }
+
+ if (candidateNumTxns > bestjmNumTxns) {
+ bestjm = candidate;
+ bestjmNumTxns = candidateNumTxns;
+ }
+ }
+
+
+ if (bestjm == null) {
+ /**
+ * If all candidates either threw a CorruptionException or
+ * found 0 transactions, then a gap exists.
+ */
+ if (corruption != null) {
+ throw new IOException("Gap exists in logs from "
+ + fromTxId, corruption);
+ } else {
+ return null;
+ }
+ }
+
+ return bestjm.getInputStream(fromTxId);
+ }
+
+ /**
+ * Run recovery on all journals to recover any unclosed segments
+ */
+ void recoverUnclosedStreams() {
+ mapJournalsAndReportErrors(new JournalClosure() {
+ @Override
+ public void apply(JournalAndStream jas) throws IOException {
+ jas.manager.recoverUnfinalizedSegments();
+ }
+ }, "recovering unclosed streams");
+ }
+
+ /**
+ * Select a list of input streams to load.
+ * @param fromTxId first transaction in the selected streams
+ * @param toAtLeast the selected streams must contain this transaction
+ */
+ Collection selectInputStreams(long fromTxId, long toAtLeastTxId)
+ throws IOException {
+ List streams = Lists.newArrayList();
+
+ boolean gapFound = false;
+ EditLogInputStream stream = selectStream(fromTxId);
+ while (stream != null) {
+ fromTxId = stream.getLastTxId() + 1;
+ streams.add(stream);
+ try {
+ stream = selectStream(fromTxId);
+ } catch (IOException ioe) {
+ gapFound = true;
+ break;
+ }
+ }
+ if (fromTxId <= toAtLeastTxId || gapFound) {
+ closeAllStreams(streams);
+ throw new IOException("No non-corrupt logs for txid "
+ + fromTxId);
+ }
+ return streams;
+ }
+
+ /**
+ * Close all the streams in a collection
+ * @param streams The list of streams to close
+ */
+ static void closeAllStreams(Iterable streams) {
+ for (EditLogInputStream s : streams) {
+ IOUtils.closeStream(s);
+ }
+ }
+
/**
* Container for a JournalManager paired with its currently
* active stream.
@@ -1078,7 +1187,7 @@ private void disableAndReportErrorOnJournals(List badJournals)
static class JournalAndStream {
private final JournalManager manager;
private EditLogOutputStream stream;
- private long segmentStartsAtTxId = FSConstants.INVALID_TXID;
+ private long segmentStartsAtTxId = HdfsConstants.INVALID_TXID;
private JournalAndStream(JournalManager manager) {
this.manager = manager;
@@ -1110,7 +1219,7 @@ void abort() {
LOG.error("Unable to abort stream " + stream, ioe);
}
stream = null;
- segmentStartsAtTxId = FSConstants.INVALID_TXID;
+ segmentStartsAtTxId = HdfsConstants.INVALID_TXID;
}
private boolean isActive() {
@@ -1137,30 +1246,5 @@ void setCurrentStreamForTests(EditLogOutputStream stream) {
JournalManager getManager() {
return manager;
}
-
- private EditLogInputStream getInProgressInputStream() throws IOException {
- return manager.getInProgressInputStream(segmentStartsAtTxId);
- }
- }
-
- /**
- * @return an EditLogInputStream that reads from the same log that
- * the edit log is currently writing. This is used from the BackupNode
- * during edits synchronization.
- * @throws IOException if no valid logs are available.
- */
- synchronized EditLogInputStream getInProgressFileInputStream()
- throws IOException {
- for (JournalAndStream jas : journals) {
- if (!jas.isActive()) continue;
- try {
- EditLogInputStream in = jas.getInProgressInputStream();
- if (in != null) return in;
- } catch (IOException ioe) {
- LOG.warn("Unable to get the in-progress input stream from " + jas,
- ioe);
- }
- }
- throw new IOException("No in-progress stream provided edits");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index db985691f6..991fd08c84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -27,7 +27,7 @@
import java.util.EnumMap;
import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
@@ -274,14 +274,14 @@ int loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit,
SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op;
fsDir.unprotectedSetQuota(setNSQuotaOp.src,
setNSQuotaOp.nsQuota,
- FSConstants.QUOTA_DONT_SET);
+ HdfsConstants.QUOTA_DONT_SET);
break;
}
case OP_CLEAR_NS_QUOTA: {
ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op;
fsDir.unprotectedSetQuota(clearNSQuotaOp.src,
- FSConstants.QUOTA_RESET,
- FSConstants.QUOTA_DONT_SET);
+ HdfsConstants.QUOTA_RESET,
+ HdfsConstants.QUOTA_DONT_SET);
break;
}
@@ -435,7 +435,7 @@ private void check203UpgradeFailure(int logVersion, IOException ex)
// The editlog must be emptied by restarting the namenode, before proceeding
// with the upgrade.
if (Storage.is203LayoutVersion(logVersion)
- && logVersion != FSConstants.LAYOUT_VERSION) {
+ && logVersion != HdfsConstants.LAYOUT_VERSION) {
String msg = "During upgrade failed to load the editlog version "
+ logVersion + " from release 0.20.203. Please go back to the old "
+ " release and restart the namenode. This empties the editlog "
@@ -446,24 +446,6 @@ private void check203UpgradeFailure(int logVersion, IOException ex)
}
}
- static EditLogValidation validateEditLog(File file) throws IOException {
- EditLogFileInputStream in;
- try {
- in = new EditLogFileInputStream(file);
- } catch (LogHeaderCorruptException corrupt) {
- // If it's missing its header, this is equivalent to no transactions
- FSImage.LOG.warn("Log at " + file + " has no valid header",
- corrupt);
- return new EditLogValidation(0, 0);
- }
-
- try {
- return validateEditLog(in);
- } finally {
- IOUtils.closeStream(in);
- }
- }
-
/**
* Return the number of valid transactions in the stream. If the stream is
* truncated during the header, returns a value indicating that there are
@@ -473,12 +455,26 @@ static EditLogValidation validateEditLog(File file) throws IOException {
* if the log does not exist)
*/
static EditLogValidation validateEditLog(EditLogInputStream in) {
- long numValid = 0;
long lastPos = 0;
+ long firstTxId = HdfsConstants.INVALID_TXID;
+ long lastTxId = HdfsConstants.INVALID_TXID;
+ long numValid = 0;
try {
+ FSEditLogOp op = null;
while (true) {
lastPos = in.getPosition();
- if (in.readOp() == null) {
+ if ((op = in.readOp()) == null) {
+ break;
+ }
+ if (firstTxId == HdfsConstants.INVALID_TXID) {
+ firstTxId = op.txid;
+ }
+ if (lastTxId == HdfsConstants.INVALID_TXID
+ || op.txid == lastTxId + 1) {
+ lastTxId = op.txid;
+ } else {
+ FSImage.LOG.error("Out of order txid found. Found " + op.txid
+ + ", expected " + (lastTxId + 1));
break;
}
numValid++;
@@ -489,16 +485,33 @@ static EditLogValidation validateEditLog(EditLogInputStream in) {
FSImage.LOG.debug("Caught exception after reading " + numValid +
" ops from " + in + " while determining its valid length.", t);
}
- return new EditLogValidation(lastPos, numValid);
+ return new EditLogValidation(lastPos, firstTxId, lastTxId);
}
static class EditLogValidation {
- long validLength;
- long numTransactions;
-
- EditLogValidation(long validLength, long numTransactions) {
+ private long validLength;
+ private long startTxId;
+ private long endTxId;
+
+ EditLogValidation(long validLength,
+ long startTxId, long endTxId) {
this.validLength = validLength;
- this.numTransactions = numTransactions;
+ this.startTxId = startTxId;
+ this.endTxId = endTxId;
+ }
+
+ long getValidLength() { return validLength; }
+
+ long getStartTxId() { return startTxId; }
+
+ long getEndTxId() { return endTxId; }
+
+ long getNumTransactions() {
+ if (endTxId == HdfsConstants.INVALID_TXID
+ || startTxId == HdfsConstants.INVALID_TXID) {
+ return 0;
+ }
+ return (endTxId - startTxId) + 1;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 6529c876c0..25f99b4081 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 8b259018f1..325e4b04ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -44,9 +44,9 @@
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.common.Util;
import static org.apache.hadoop.hdfs.server.common.Util.now;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
@@ -70,7 +70,6 @@
public class FSImage implements Closeable {
protected static final Log LOG = LogFactory.getLog(FSImage.class.getName());
- protected FSNamesystem namesystem = null;
protected FSEditLog editLog = null;
private boolean isUpgradeFinalized = false;
@@ -82,38 +81,20 @@ public class FSImage implements Closeable {
*/
protected long lastAppliedTxId = 0;
- /**
- * URIs for importing an image from a checkpoint. In the default case,
- * URIs will represent directories.
- */
- private Collection checkpointDirs;
- private Collection checkpointEditsDirs;
-
final private Configuration conf;
private final NNStorageRetentionManager archivalManager;
- /**
- * Construct an FSImage.
- * @param conf Configuration
- * @see #FSImage(Configuration conf, FSNamesystem ns,
- * Collection imageDirs, Collection editsDirs)
- * @throws IOException if default directories are invalid.
- */
- public FSImage(Configuration conf) throws IOException {
- this(conf, (FSNamesystem)null);
- }
/**
* Construct an FSImage
* @param conf Configuration
- * @param ns The FSNamesystem using this image.
- * @see #FSImage(Configuration conf, FSNamesystem ns,
+ * @see #FSImage(Configuration conf,
* Collection imageDirs, Collection editsDirs)
* @throws IOException if default directories are invalid.
*/
- private FSImage(Configuration conf, FSNamesystem ns) throws IOException {
- this(conf, ns,
+ protected FSImage(Configuration conf) throws IOException {
+ this(conf,
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
}
@@ -124,17 +105,14 @@ private FSImage(Configuration conf, FSNamesystem ns) throws IOException {
* Setup storage and initialize the edit log.
*
* @param conf Configuration
- * @param ns The FSNamesystem using this image.
* @param imageDirs Directories the image can be stored in.
* @param editsDirs Directories the editlog can be stored in.
* @throws IOException if directories are invalid.
*/
- protected FSImage(Configuration conf, FSNamesystem ns,
+ protected FSImage(Configuration conf,
Collection imageDirs, Collection editsDirs)
throws IOException {
this.conf = conf;
- setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null),
- FSImage.getCheckpointEditsDirs(conf, null));
storage = new NNStorage(conf, imageDirs, editsDirs);
if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
@@ -143,31 +121,18 @@ protected FSImage(Configuration conf, FSNamesystem ns,
}
this.editLog = new FSEditLog(storage);
- setFSNamesystem(ns);
archivalManager = new NNStorageRetentionManager(conf, storage, editLog);
}
-
- protected FSNamesystem getFSNamesystem() {
- return namesystem;
- }
-
- void setFSNamesystem(FSNamesystem ns) {
- namesystem = ns;
- if (ns != null) {
- storage.setUpgradeManager(ns.upgradeManager);
- }
- }
- void setCheckpointDirectories(Collection dirs,
- Collection editsDirs) {
- checkpointDirs = dirs;
- checkpointEditsDirs = editsDirs;
- }
-
- void format(String clusterId) throws IOException {
+ void format(FSNamesystem fsn, String clusterId) throws IOException {
+ long fileCount = fsn.getTotalFiles();
+ // Expect 1 file, which is the root inode
+ Preconditions.checkState(fileCount == 1,
+ "FSImage.format should be called with an uninitialized namesystem, has " +
+ fileCount + " files");
storage.format(clusterId);
- saveFSImageInAllDirs(0);
+ saveFSImageInAllDirs(fsn, 0);
}
/**
@@ -179,7 +144,7 @@ void format(String clusterId) throws IOException {
* @throws IOException
* @return true if the image needs to be saved or false otherwise
*/
- boolean recoverTransitionRead(StartupOption startOpt)
+ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target)
throws IOException {
assert startOpt != StartupOption.FORMAT :
"NameNode formatting should be performed before reading the image";
@@ -187,21 +152,14 @@ boolean recoverTransitionRead(StartupOption startOpt)
Collection imageDirs = storage.getImageDirectories();
Collection editsDirs = storage.getEditsDirectories();
+
// none of the data dirs exist
if((imageDirs.size() == 0 || editsDirs.size() == 0)
&& startOpt != StartupOption.IMPORT)
throw new IOException(
"All specified directories are not accessible or do not exist.");
- if(startOpt == StartupOption.IMPORT
- && (checkpointDirs == null || checkpointDirs.isEmpty()))
- throw new IOException("Cannot import image from a checkpoint. "
- + "\"dfs.namenode.checkpoint.dir\" is not set." );
-
- if(startOpt == StartupOption.IMPORT
- && (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()))
- throw new IOException("Cannot import image from a checkpoint. "
- + "\"dfs.namenode.checkpoint.dir\" is not set." );
+ storage.setUpgradeManager(target.upgradeManager);
// 1. For each data directory calculate its state and
// check whether all is consistent before transitioning.
@@ -227,11 +185,11 @@ boolean recoverTransitionRead(StartupOption startOpt)
}
if (startOpt != StartupOption.UPGRADE
&& layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
- && layoutVersion != FSConstants.LAYOUT_VERSION) {
+ && layoutVersion != HdfsConstants.LAYOUT_VERSION) {
throw new IOException(
"\nFile system image contains an old layout version "
+ storage.getLayoutVersion() + ".\nAn upgrade to version "
- + FSConstants.LAYOUT_VERSION + " is required.\n"
+ + HdfsConstants.LAYOUT_VERSION + " is required.\n"
+ "Please restart NameNode with -upgrade option.");
}
@@ -261,10 +219,10 @@ boolean recoverTransitionRead(StartupOption startOpt)
// 3. Do transitions
switch(startOpt) {
case UPGRADE:
- doUpgrade();
+ doUpgrade(target);
return false; // upgrade saved image already
case IMPORT:
- doImportCheckpoint();
+ doImportCheckpoint(target);
return false; // import checkpoint saved image already
case ROLLBACK:
doRollback();
@@ -273,7 +231,7 @@ boolean recoverTransitionRead(StartupOption startOpt)
// just load the image
}
- return loadFSImage();
+ return loadFSImage(target);
}
/**
@@ -324,11 +282,11 @@ private boolean recoverStorageDirs(StartupOption startOpt,
return isFormatted;
}
- private void doUpgrade() throws IOException {
+ private void doUpgrade(FSNamesystem target) throws IOException {
if(storage.getDistributedUpgradeState()) {
// only distributed upgrade need to continue
// don't do version upgrade
- this.loadFSImage();
+ this.loadFSImage(target);
storage.initializeDistributedUpgrade();
return;
}
@@ -343,13 +301,13 @@ private void doUpgrade() throws IOException {
}
// load the latest image
- this.loadFSImage();
+ this.loadFSImage(target);
// Do upgrade for each directory
long oldCTime = storage.getCTime();
storage.cTime = now(); // generate new cTime for the state
int oldLV = storage.getLayoutVersion();
- storage.layoutVersion = FSConstants.LAYOUT_VERSION;
+ storage.layoutVersion = HdfsConstants.LAYOUT_VERSION;
List errorSDs =
Collections.synchronizedList(new ArrayList());
@@ -385,7 +343,7 @@ private void doUpgrade() throws IOException {
storage.reportErrorsOnDirectories(errorSDs);
errorSDs.clear();
- saveFSImageInAllDirs(editLog.getLastWrittenTxId());
+ saveFSImageInAllDirs(target, editLog.getLastWrittenTxId());
for (Iterator it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
@@ -422,8 +380,8 @@ private void doRollback() throws IOException {
// a previous fs states in at least one of the storage directories.
// Directories that don't have previous state do not rollback
boolean canRollback = false;
- FSImage prevState = new FSImage(conf, getFSNamesystem());
- prevState.getStorage().layoutVersion = FSConstants.LAYOUT_VERSION;
+ FSImage prevState = new FSImage(conf);
+ prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION;
for (Iterator it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
File prevDir = sd.getPreviousDir();
@@ -438,12 +396,12 @@ private void doRollback() throws IOException {
// read and verify consistency of the prev dir
prevState.getStorage().readPreviousVersionProperties(sd);
- if (prevState.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
+ if (prevState.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) {
throw new IOException(
"Cannot rollback to storage version " +
prevState.getLayoutVersion() +
" using this version of the NameNode, which uses storage version " +
- FSConstants.LAYOUT_VERSION + ". " +
+ HdfsConstants.LAYOUT_VERSION + ". " +
"Please use the previous version of HDFS to perform the rollback.");
}
canRollback = true;
@@ -504,19 +462,32 @@ private void doFinalize(StorageDirectory sd) throws IOException {
/**
* Load image from a checkpoint directory and save it into the current one.
+ * @param target the NameSystem to import into
* @throws IOException
*/
- void doImportCheckpoint() throws IOException {
- FSNamesystem fsNamesys = getFSNamesystem();
- FSImage ckptImage = new FSImage(conf, fsNamesys,
+ void doImportCheckpoint(FSNamesystem target) throws IOException {
+ Collection checkpointDirs =
+ FSImage.getCheckpointDirs(conf, null);
+ Collection checkpointEditsDirs =
+ FSImage.getCheckpointEditsDirs(conf, null);
+
+ if (checkpointDirs == null || checkpointDirs.isEmpty()) {
+ throw new IOException("Cannot import image from a checkpoint. "
+ + "\"dfs.namenode.checkpoint.dir\" is not set." );
+ }
+
+ if (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()) {
+ throw new IOException("Cannot import image from a checkpoint. "
+ + "\"dfs.namenode.checkpoint.dir\" is not set." );
+ }
+
+ FSImage realImage = target.getFSImage();
+ FSImage ckptImage = new FSImage(conf,
checkpointDirs, checkpointEditsDirs);
- // replace real image with the checkpoint image
- FSImage realImage = fsNamesys.getFSImage();
- assert realImage == this;
- fsNamesys.dir.fsImage = ckptImage;
+ target.dir.fsImage = ckptImage;
// load from the checkpoint dirs
try {
- ckptImage.recoverTransitionRead(StartupOption.REGULAR);
+ ckptImage.recoverTransitionRead(StartupOption.REGULAR, target);
} finally {
ckptImage.close();
}
@@ -524,10 +495,11 @@ void doImportCheckpoint() throws IOException {
realImage.getStorage().setStorageInfo(ckptImage.getStorage());
realImage.getEditLog().setNextTxId(ckptImage.getEditLog().getLastWrittenTxId()+1);
- fsNamesys.dir.fsImage = realImage;
+ target.dir.fsImage = realImage;
realImage.getStorage().setBlockPoolID(ckptImage.getBlockPoolID());
+
// and save it but keep the same checkpointTime
- saveNamespace();
+ saveNamespace(target);
getStorage().writeAll();
}
@@ -558,11 +530,11 @@ void openEditLog() throws IOException {
* Toss the current image and namesystem, reloading from the specified
* file.
*/
- void reloadFromImageFile(File file) throws IOException {
- namesystem.dir.reset();
+ void reloadFromImageFile(File file, FSNamesystem target) throws IOException {
+ target.dir.reset();
LOG.debug("Reloading namespace from " + file);
- loadFSImage(file);
+ loadFSImage(file, target);
}
/**
@@ -580,36 +552,42 @@ void reloadFromImageFile(File file) throws IOException {
* @return whether the image should be saved
* @throws IOException
*/
- boolean loadFSImage() throws IOException {
+ boolean loadFSImage(FSNamesystem target) throws IOException {
FSImageStorageInspector inspector = storage.readAndInspectDirs();
isUpgradeFinalized = inspector.isUpgradeFinalized();
-
+
+ FSImageStorageInspector.FSImageFile imageFile
+ = inspector.getLatestImage();
boolean needToSave = inspector.needToSave();
+
+ Iterable editStreams = null;
+
+ editLog.recoverUnclosedStreams();
+
+ if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT,
+ getLayoutVersion())) {
+ editStreams = editLog.selectInputStreams(imageFile.getCheckpointTxId() + 1,
+ inspector.getMaxSeenTxId());
+ } else {
+ editStreams = FSImagePreTransactionalStorageInspector
+ .getEditLogStreams(storage);
+ }
+
+ LOG.debug("Planning to load image :\n" + imageFile);
+ for (EditLogInputStream l : editStreams) {
+ LOG.debug("\t Planning to load edit stream: " + l);
+ }
- // Plan our load. This will throw if it's impossible to load from the
- // data that's available.
- LoadPlan loadPlan = inspector.createLoadPlan();
- LOG.debug("Planning to load image using following plan:\n" + loadPlan);
-
-
- // Recover from previous interrupted checkpoint, if any
- needToSave |= loadPlan.doRecovery();
-
- //
- // Load in bits
- //
- StorageDirectory sdForProperties =
- loadPlan.getStorageDirectoryForProperties();
- storage.readProperties(sdForProperties);
- File imageFile = loadPlan.getImageFile();
-
try {
+ StorageDirectory sdForProperties = imageFile.sd;
+ storage.readProperties(sdForProperties);
+
if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT,
getLayoutVersion())) {
// For txid-based layout, we should have a .md5 file
// next to the image file
- loadFSImage(imageFile);
+ loadFSImage(imageFile.getFile(), target);
} else if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM,
getLayoutVersion())) {
// In 0.22, we have the checksum stored in the VERSION file.
@@ -621,17 +599,19 @@ boolean loadFSImage() throws IOException {
NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
" not set for storage directory " + sdForProperties.getRoot());
}
- loadFSImage(imageFile, new MD5Hash(md5));
+ loadFSImage(imageFile.getFile(), new MD5Hash(md5), target);
} else {
// We don't have any record of the md5sum
- loadFSImage(imageFile, null);
+ loadFSImage(imageFile.getFile(), null, target);
}
} catch (IOException ioe) {
- throw new IOException("Failed to load image from " + loadPlan.getImageFile(), ioe);
+ FSEditLog.closeAllStreams(editStreams);
+ throw new IOException("Failed to load image from " + imageFile, ioe);
}
- long numLoaded = loadEdits(loadPlan.getEditsFiles());
- needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile, numLoaded);
+ long numLoaded = loadEdits(editStreams, target);
+ needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(),
+ numLoaded);
// update the txid for the edit log
editLog.setNextTxId(storage.getMostRecentCheckpointTxId() + numLoaded + 1);
@@ -663,26 +643,30 @@ private boolean needsResaveBasedOnStaleCheckpoint(
* Load the specified list of edit files into the image.
* @return the number of transactions loaded
*/
- protected long loadEdits(List editLogs) throws IOException {
- LOG.debug("About to load edits:\n " + Joiner.on("\n ").join(editLogs));
+ protected long loadEdits(Iterable editStreams,
+ FSNamesystem target) throws IOException {
+ LOG.debug("About to load edits:\n " + Joiner.on("\n ").join(editStreams));
long startingTxId = getLastAppliedTxId() + 1;
-
- FSEditLogLoader loader = new FSEditLogLoader(namesystem);
int numLoaded = 0;
- // Load latest edits
- for (File edits : editLogs) {
- LOG.debug("Reading " + edits + " expecting start txid #" + startingTxId);
- EditLogFileInputStream editIn = new EditLogFileInputStream(edits);
- int thisNumLoaded = loader.loadFSEdits(editIn, startingTxId);
- startingTxId += thisNumLoaded;
- numLoaded += thisNumLoaded;
- lastAppliedTxId += thisNumLoaded;
- editIn.close();
+
+ try {
+ FSEditLogLoader loader = new FSEditLogLoader(target);
+
+ // Load latest edits
+ for (EditLogInputStream editIn : editStreams) {
+ LOG.info("Reading " + editIn + " expecting start txid #" + startingTxId);
+ int thisNumLoaded = loader.loadFSEdits(editIn, startingTxId);
+ startingTxId += thisNumLoaded;
+ numLoaded += thisNumLoaded;
+ lastAppliedTxId += thisNumLoaded;
+ }
+ } finally {
+ FSEditLog.closeAllStreams(editStreams);
}
// update the counts
- getFSNamesystem().dir.updateCountForINodeWithQuota();
+ target.dir.updateCountForINodeWithQuota();
return numLoaded;
}
@@ -691,13 +675,14 @@ protected long loadEdits(List editLogs) throws IOException {
* Load the image namespace from the given image file, verifying
* it against the MD5 sum stored in its associated .md5 file.
*/
- private void loadFSImage(File imageFile) throws IOException {
+ private void loadFSImage(File imageFile, FSNamesystem target)
+ throws IOException {
MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
if (expectedMD5 == null) {
throw new IOException("No MD5 file found corresponding to image file "
+ imageFile);
}
- loadFSImage(imageFile, expectedMD5);
+ loadFSImage(imageFile, expectedMD5, target);
}
/**
@@ -705,11 +690,12 @@ private void loadFSImage(File imageFile) throws IOException {
* filenames and blocks. Return whether we should
* "re-save" and consolidate the edit-logs
*/
- private void loadFSImage(File curFile, MD5Hash expectedMd5) throws IOException {
+ private void loadFSImage(File curFile, MD5Hash expectedMd5,
+ FSNamesystem target) throws IOException {
FSImageFormat.Loader loader = new FSImageFormat.Loader(
- conf, getFSNamesystem());
+ conf, target);
loader.load(curFile);
- namesystem.setBlockPoolId(this.getBlockPoolID());
+ target.setBlockPoolId(this.getBlockPoolID());
// Check that the image digest we loaded matches up with what
// we expected
@@ -730,13 +716,14 @@ private void loadFSImage(File curFile, MD5Hash expectedMd5) throws IOException {
/**
* Save the contents of the FS image to the file.
*/
- void saveFSImage(StorageDirectory sd, long txid) throws IOException {
+ void saveFSImage(FSNamesystem source, StorageDirectory sd, long txid)
+ throws IOException {
File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
File dstFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, txid);
FSImageFormat.Saver saver = new FSImageFormat.Saver();
FSImageCompression compression = FSImageCompression.createCompression(conf);
- saver.save(newFile, txid, getFSNamesystem(), compression);
+ saver.save(newFile, txid, source, compression);
MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
storage.setMostRecentCheckpointTxId(txid);
@@ -757,8 +744,11 @@ private class FSImageSaver implements Runnable {
private StorageDirectory sd;
private List errorSDs;
private final long txid;
+ private final FSNamesystem source;
- FSImageSaver(StorageDirectory sd, List errorSDs, long txid) {
+ FSImageSaver(FSNamesystem source, StorageDirectory sd,
+ List errorSDs, long txid) {
+ this.source = source;
this.sd = sd;
this.errorSDs = errorSDs;
this.txid = txid;
@@ -766,7 +756,7 @@ private class FSImageSaver implements Runnable {
public void run() {
try {
- saveFSImage(sd, txid);
+ saveFSImage(source, sd, txid);
} catch (Throwable t) {
LOG.error("Unable to save image for " + sd.getRoot(), t);
errorSDs.add(sd);
@@ -795,7 +785,7 @@ private void waitForThreads(List threads) {
* Save the contents of the FS image to a new image file in each of the
* current storage directories.
*/
- void saveNamespace() throws IOException {
+ void saveNamespace(FSNamesystem source) throws IOException {
assert editLog != null : "editLog must be initialized";
storage.attemptRestoreRemovedStorage();
@@ -806,7 +796,7 @@ void saveNamespace() throws IOException {
}
long imageTxId = editLog.getLastWrittenTxId();
try {
- saveFSImageInAllDirs(imageTxId);
+ saveFSImageInAllDirs(source, imageTxId);
storage.writeAll();
} finally {
if (editLogWasOpen) {
@@ -818,7 +808,8 @@ void saveNamespace() throws IOException {
}
- protected void saveFSImageInAllDirs(long txid) throws IOException {
+ protected void saveFSImageInAllDirs(FSNamesystem source, long txid)
+ throws IOException {
if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) {
throw new IOException("No image directories available!");
}
@@ -831,7 +822,7 @@ protected void saveFSImageInAllDirs(long txid) throws IOException {
for (Iterator it
= storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
StorageDirectory sd = it.next();
- FSImageSaver saver = new FSImageSaver(sd, errorSDs, txid);
+ FSImageSaver saver = new FSImageSaver(source, sd, errorSDs, txid);
Thread saveThread = new Thread(saver, saver.toString());
saveThreads.add(saveThread);
saveThread.start();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 453985d917..c178e048b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -39,7 +39,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -555,9 +555,14 @@ void save(File newFile,
DigestOutputStream fos = new DigestOutputStream(fout, digester);
DataOutputStream out = new DataOutputStream(fos);
try {
- out.writeInt(FSConstants.LAYOUT_VERSION);
- out.writeInt(sourceNamesystem.getFSImage()
- .getStorage().getNamespaceID()); // TODO bad dependency
+ out.writeInt(HdfsConstants.LAYOUT_VERSION);
+ // We use the non-locked version of getNamespaceInfo here since
+ // the coordinating thread of saveNamespace already has read-locked
+ // the namespace for us. If we attempt to take another readlock
+ // from the actual saver thread, there's a potential of a
+ // fairness-related deadlock. See the comments on HDFS-2223.
+ out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
+ .getNamespaceID());
out.writeLong(fsDir.rootDir.numItemsInTree());
out.writeLong(sourceNamesystem.getGenerationStamp());
out.writeLong(txid);
@@ -568,7 +573,7 @@ void save(File newFile,
" using " + compression);
- byte[] byteStore = new byte[4*FSConstants.MAX_PATH_LENGTH];
+ byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH];
ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
// save the root
FSImageSerialization.saveINode2Image(fsDir.rootDir, out);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
index cec2eeff2d..91076ef5f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
@@ -32,6 +32,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
@@ -55,6 +56,7 @@ class FSImagePreTransactionalStorageInspector extends FSImageStorageInspector {
private boolean hasOutOfDateStorageDirs = false;
/* Flag set false if there are any "previous" directories found */
private boolean isUpgradeFinalized = true;
+ private boolean needToSaveAfterRecovery = false;
// Track the name and edits dir with the latest times
private long latestNameCheckpointTime = Long.MIN_VALUE;
@@ -139,15 +141,15 @@ static long readCheckpointTime(StorageDirectory sd) throws IOException {
boolean isUpgradeFinalized() {
return isUpgradeFinalized;
}
-
+
@Override
- LoadPlan createLoadPlan() throws IOException {
+ FSImageFile getLatestImage() throws IOException {
// We should have at least one image and one edits dirs
if (latestNameSD == null)
throw new IOException("Image file is not found in " + imageDirs);
if (latestEditsSD == null)
throw new IOException("Edits file is not found in " + editsDirs);
-
+
// Make sure we are loading image and edits from same checkpoint
if (latestNameCheckpointTime > latestEditsCheckpointTime
&& latestNameSD != latestEditsSD
@@ -168,92 +170,70 @@ LoadPlan createLoadPlan() throws IOException {
"image checkpoint time = " + latestNameCheckpointTime +
"edits checkpoint time = " + latestEditsCheckpointTime);
}
+
+ needToSaveAfterRecovery = doRecovery();
- return new PreTransactionalLoadPlan();
+ return new FSImageFile(latestNameSD,
+ NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE),
+ HdfsConstants.INVALID_TXID);
}
-
+
@Override
boolean needToSave() {
return hasOutOfDateStorageDirs ||
checkpointTimes.size() != 1 ||
- latestNameCheckpointTime > latestEditsCheckpointTime;
-
+ latestNameCheckpointTime > latestEditsCheckpointTime ||
+ needToSaveAfterRecovery;
}
- private class PreTransactionalLoadPlan extends LoadPlan {
-
- @Override
- boolean doRecovery() throws IOException {
- LOG.debug(
+ boolean doRecovery() throws IOException {
+ LOG.debug(
"Performing recovery in "+ latestNameSD + " and " + latestEditsSD);
- boolean needToSave = false;
- File curFile =
- NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE);
- File ckptFile =
- NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE_NEW);
-
- //
- // If we were in the midst of a checkpoint
- //
- if (ckptFile.exists()) {
- needToSave = true;
- if (NNStorage.getStorageFile(latestEditsSD, NameNodeFile.EDITS_NEW)
- .exists()) {
- //
- // checkpointing migth have uploaded a new
- // merged image, but we discard it here because we are
- // not sure whether the entire merged image was uploaded
- // before the namenode crashed.
- //
- if (!ckptFile.delete()) {
- throw new IOException("Unable to delete " + ckptFile);
- }
- } else {
- //
- // checkpointing was in progress when the namenode
- // shutdown. The fsimage.ckpt was created and the edits.new
- // file was moved to edits. We complete that checkpoint by
- // moving fsimage.new to fsimage. There is no need to
- // update the fstime file here. renameTo fails on Windows
- // if the destination file already exists.
- //
+ boolean needToSave = false;
+ File curFile =
+ NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE);
+ File ckptFile =
+ NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE_NEW);
+
+ //
+ // If we were in the midst of a checkpoint
+ //
+ if (ckptFile.exists()) {
+ needToSave = true;
+ if (NNStorage.getStorageFile(latestEditsSD, NameNodeFile.EDITS_NEW)
+ .exists()) {
+ //
+ // checkpointing migth have uploaded a new
+ // merged image, but we discard it here because we are
+ // not sure whether the entire merged image was uploaded
+ // before the namenode crashed.
+ //
+ if (!ckptFile.delete()) {
+ throw new IOException("Unable to delete " + ckptFile);
+ }
+ } else {
+ //
+ // checkpointing was in progress when the namenode
+ // shutdown. The fsimage.ckpt was created and the edits.new
+ // file was moved to edits. We complete that checkpoint by
+ // moving fsimage.new to fsimage. There is no need to
+ // update the fstime file here. renameTo fails on Windows
+ // if the destination file already exists.
+ //
+ if (!ckptFile.renameTo(curFile)) {
+ if (!curFile.delete())
+ LOG.warn("Unable to delete dir " + curFile + " before rename");
if (!ckptFile.renameTo(curFile)) {
- if (!curFile.delete())
- LOG.warn("Unable to delete dir " + curFile + " before rename");
- if (!ckptFile.renameTo(curFile)) {
- throw new IOException("Unable to rename " + ckptFile +
- " to " + curFile);
- }
+ throw new IOException("Unable to rename " + ckptFile +
+ " to " + curFile);
}
}
}
- return needToSave;
}
-
- @Override
- File getImageFile() {
- return NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE);
- }
-
- @Override
- List getEditsFiles() {
- if (latestNameCheckpointTime > latestEditsCheckpointTime) {
- // the image is already current, discard edits
- LOG.debug(
- "Name checkpoint time is newer than edits, not loading edits.");
- return Collections.emptyList();
- }
-
- return getEditsInStorageDir(latestEditsSD);
- }
-
- @Override
- StorageDirectory getStorageDirectoryForProperties() {
- return latestNameSD;
- }
+ return needToSave;
}
-
+
/**
* @return a list with the paths to EDITS and EDITS_NEW (if it exists)
* in a given storage directory.
@@ -269,4 +249,33 @@ static List getEditsInStorageDir(StorageDirectory sd) {
}
return files;
}
+
+ private List getLatestEditsFiles() {
+ if (latestNameCheckpointTime > latestEditsCheckpointTime) {
+ // the image is already current, discard edits
+ LOG.debug(
+ "Name checkpoint time is newer than edits, not loading edits.");
+ return Collections.emptyList();
+ }
+
+ return getEditsInStorageDir(latestEditsSD);
+ }
+
+ @Override
+ long getMaxSeenTxId() {
+ return 0L;
+ }
+
+ static Iterable getEditLogStreams(NNStorage storage)
+ throws IOException {
+ FSImagePreTransactionalStorageInspector inspector
+ = new FSImagePreTransactionalStorageInspector();
+ storage.inspectStorageDirs(inspector);
+
+ List editStreams = new ArrayList();
+ for (File f : inspector.getLatestEditsFiles()) {
+ editStreams.add(new EditLogFileInputStream(f));
+ }
+ return editStreams;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index 277fac0eb9..3ed8513636 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java
index 65bfa0ac55..a7c2949f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java
@@ -21,6 +21,7 @@
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -43,60 +44,22 @@ abstract class FSImageStorageInspector {
abstract boolean isUpgradeFinalized();
/**
- * Create a plan to load the image from the set of inspected storage directories.
+ * Get the image files which should be loaded into the filesystem.
* @throws IOException if not enough files are available (eg no image found in any directory)
*/
- abstract LoadPlan createLoadPlan() throws IOException;
-
+ abstract FSImageFile getLatestImage() throws IOException;
+
+ /**
+ * Get the minimum tx id which should be loaded with this set of images.
+ */
+ abstract long getMaxSeenTxId();
+
/**
* @return true if the directories are in such a state that the image should be re-saved
* following the load
*/
abstract boolean needToSave();
- /**
- * A plan to load the namespace from disk, providing the locations from which to load
- * the image and a set of edits files.
- */
- abstract static class LoadPlan {
- /**
- * Execute atomic move sequence in the chosen storage directories,
- * in order to recover from an interrupted checkpoint.
- * @return true if some recovery action was taken
- */
- abstract boolean doRecovery() throws IOException;
-
- /**
- * @return the file from which to load the image data
- */
- abstract File getImageFile();
-
- /**
- * @return a list of flies containing edits to replay
- */
- abstract List getEditsFiles();
-
- /**
- * @return the storage directory containing the VERSION file that should be
- * loaded.
- */
- abstract StorageDirectory getStorageDirectoryForProperties();
-
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("Will load image file: ").append(getImageFile()).append("\n");
- sb.append("Will load edits files:").append("\n");
- for (File f : getEditsFiles()) {
- sb.append(" ").append(f).append("\n");
- }
- sb.append("Will load metadata from: ")
- .append(getStorageDirectoryForProperties())
- .append("\n");
- return sb.toString();
- }
- }
-
/**
* Record of an image that has been located and had its filename parsed.
*/
@@ -106,7 +69,8 @@ static class FSImageFile {
private final File file;
FSImageFile(StorageDirectory sd, File file, long txId) {
- assert txId >= 0 : "Invalid txid on " + file +": " + txId;
+ assert txId >= 0 || txId == HdfsConstants.INVALID_TXID
+ : "Invalid txid on " + file +": " + txId;
this.sd = sd;
this.txId = txId;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
index 0814a140b5..33d6e90f92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
@@ -35,11 +35,10 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
-import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -55,9 +54,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
private boolean isUpgradeFinalized = true;
List foundImages = new ArrayList();
- List foundEditLogs = new ArrayList();
- SortedMap logGroups = new TreeMap();
- long maxSeenTxId = 0;
+ private long maxSeenTxId = 0;
private static final Pattern IMAGE_REGEX = Pattern.compile(
NameNodeFile.IMAGE.getName() + "_(\\d+)");
@@ -71,6 +68,8 @@ public void inspectDirectory(StorageDirectory sd) throws IOException {
return;
}
+ maxSeenTxId = Math.max(maxSeenTxId, NNStorage.readTransactionIdFile(sd));
+
File currentDir = sd.getCurrentDir();
File filesInStorage[];
try {
@@ -113,34 +112,10 @@ public void inspectDirectory(StorageDirectory sd) throws IOException {
LOG.warn("Unable to determine the max transaction ID seen by " + sd, ioe);
}
- List editLogs
- = FileJournalManager.matchEditLogs(filesInStorage);
- if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
- for (EditLogFile log : editLogs) {
- addEditLog(log);
- }
- } else if (!editLogs.isEmpty()){
- LOG.warn("Found the following edit log file(s) in " + sd +
- " even though it was not configured to store edits:\n" +
- " " + Joiner.on("\n ").join(editLogs));
-
- }
-
// set finalized flag
isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists();
}
- private void addEditLog(EditLogFile foundEditLog) {
- foundEditLogs.add(foundEditLog);
- LogGroup group = logGroups.get(foundEditLog.getFirstTxId());
- if (group == null) {
- group = new LogGroup(foundEditLog.getFirstTxId());
- logGroups.put(foundEditLog.getFirstTxId(), group);
- }
- group.add(foundEditLog);
- }
-
-
@Override
public boolean isUpgradeFinalized() {
return isUpgradeFinalized;
@@ -151,9 +126,13 @@ public boolean isUpgradeFinalized() {
* If there are multiple storage directories which contain equal images
* the storage directory that was inspected first will be preferred.
*
- * Returns null if no images were found.
+ * @throws FileNotFoundException if not images are found.
*/
- FSImageFile getLatestImage() {
+ FSImageFile getLatestImage() throws IOException {
+ if (foundImages.isEmpty()) {
+ throw new FileNotFoundException("No valid image files found");
+ }
+
FSImageFile ret = null;
for (FSImageFile img : foundImages) {
if (ret == null || img.txId > ret.txId) {
@@ -167,349 +146,13 @@ public List getFoundImages() {
return ImmutableList.copyOf(foundImages);
}
- public List getEditLogFiles() {
- return ImmutableList.copyOf(foundEditLogs);
- }
-
- @Override
- public LoadPlan createLoadPlan() throws IOException {
- if (foundImages.isEmpty()) {
- throw new FileNotFoundException("No valid image files found");
- }
-
- FSImageFile recoveryImage = getLatestImage();
- LogLoadPlan logPlan = createLogLoadPlan(recoveryImage.txId, Long.MAX_VALUE);
-
- return new TransactionalLoadPlan(recoveryImage,
- logPlan);
- }
-
- /**
- * Plan which logs to load in order to bring the namespace up-to-date.
- * Transactions will be considered in the range (sinceTxId, maxTxId]
- *
- * @param sinceTxId the highest txid that is already loaded
- * (eg from the image checkpoint)
- * @param maxStartTxId ignore any log files that start after this txid
- */
- LogLoadPlan createLogLoadPlan(long sinceTxId, long maxStartTxId) throws IOException {
- long expectedTxId = sinceTxId + 1;
-
- List recoveryLogs = new ArrayList();
-
- SortedMap tailGroups = logGroups.tailMap(expectedTxId);
- if (logGroups.size() > tailGroups.size()) {
- LOG.debug("Excluded " + (logGroups.size() - tailGroups.size()) +
- " groups of logs because they start with a txid less than image " +
- "txid " + sinceTxId);
- }
-
- SortedMap usefulGroups;
- if (maxStartTxId > sinceTxId) {
- usefulGroups = tailGroups.headMap(maxStartTxId);
- } else {
- usefulGroups = new TreeMap();
- }
-
- if (usefulGroups.size() > tailGroups.size()) {
- LOG.debug("Excluded " + (tailGroups.size() - usefulGroups.size()) +
- " groups of logs because they start with a txid higher than max " +
- "txid " + sinceTxId);
- }
-
-
- for (Map.Entry entry : usefulGroups.entrySet()) {
- long logStartTxId = entry.getKey();
- LogGroup logGroup = entry.getValue();
-
- logGroup.planRecovery();
-
- if (expectedTxId != FSConstants.INVALID_TXID && logStartTxId != expectedTxId) {
- throw new IOException("Expected next log group would start at txid " +
- expectedTxId + " but starts at txid " + logStartTxId);
- }
-
- // We can pick any of the non-corrupt logs here
- recoveryLogs.add(logGroup.getBestNonCorruptLog());
-
- // If this log group was finalized, we know to expect the next
- // log group to start at the following txid (ie no gaps)
- if (logGroup.hasKnownLastTxId()) {
- expectedTxId = logGroup.getLastTxId() + 1;
- } else {
- // the log group was in-progress so we don't know what ID
- // the next group should start from.
- expectedTxId = FSConstants.INVALID_TXID;
- }
- }
-
- long lastLogGroupStartTxId = usefulGroups.isEmpty() ?
- 0 : usefulGroups.lastKey();
- if (maxSeenTxId > sinceTxId &&
- maxSeenTxId > lastLogGroupStartTxId) {
- String msg = "At least one storage directory indicated it has seen a " +
- "log segment starting at txid " + maxSeenTxId;
- if (usefulGroups.isEmpty()) {
- msg += " but there are no logs to load.";
- } else {
- msg += " but the most recent log file found starts with txid " +
- lastLogGroupStartTxId;
- }
- throw new IOException(msg);
- }
-
- return new LogLoadPlan(recoveryLogs,
- Lists.newArrayList(usefulGroups.values()));
-
- }
-
@Override
public boolean needToSave() {
return needToSave;
}
-
- /**
- * A group of logs that all start at the same txid.
- *
- * Handles determining which logs are corrupt and which should be considered
- * candidates for loading.
- */
- static class LogGroup {
- long startTxId;
- List logs = new ArrayList();;
- private Set endTxIds = new TreeSet();
- private boolean hasInProgress = false;
- private boolean hasFinalized = false;
-
- LogGroup(long startTxId) {
- this.startTxId = startTxId;
- }
-
- EditLogFile getBestNonCorruptLog() {
- // First look for non-corrupt finalized logs
- for (EditLogFile log : logs) {
- if (!log.isCorrupt() && !log.isInProgress()) {
- return log;
- }
- }
- // Then look for non-corrupt in-progress logs
- for (EditLogFile log : logs) {
- if (!log.isCorrupt()) {
- return log;
- }
- }
- // We should never get here, because we don't get to the planning stage
- // without calling planRecovery first, and if we've called planRecovery,
- // we would have already thrown if there were no non-corrupt logs!
- throw new IllegalStateException(
- "No non-corrupt logs for txid " + startTxId);
- }
-
- /**
- * @return true if we can determine the last txid in this log group.
- */
- boolean hasKnownLastTxId() {
- for (EditLogFile log : logs) {
- if (!log.isInProgress()) {
- return true;
- }
- }
- return false;
- }
-
- /**
- * @return the last txid included in the logs in this group
- * @throws IllegalStateException if it is unknown -
- * {@see #hasKnownLastTxId()}
- */
- long getLastTxId() {
- for (EditLogFile log : logs) {
- if (!log.isInProgress()) {
- return log.getLastTxId();
- }
- }
- throw new IllegalStateException("LogGroup only has in-progress logs");
- }
-
-
- void add(EditLogFile log) {
- assert log.getFirstTxId() == startTxId;
- logs.add(log);
-
- if (log.isInProgress()) {
- hasInProgress = true;
- } else {
- hasFinalized = true;
- endTxIds.add(log.getLastTxId());
- }
- }
-
- void planRecovery() throws IOException {
- assert hasInProgress || hasFinalized;
-
- checkConsistentEndTxIds();
-
- if (hasFinalized && hasInProgress) {
- planMixedLogRecovery();
- } else if (!hasFinalized && hasInProgress) {
- planAllInProgressRecovery();
- } else if (hasFinalized && !hasInProgress) {
- LOG.debug("No recovery necessary for logs starting at txid " +
- startTxId);
- }
- }
-
- /**
- * Recovery case for when some logs in the group were in-progress, and
- * others were finalized. This happens when one of the storage
- * directories fails.
- *
- * The in-progress logs in this case should be considered corrupt.
- */
- private void planMixedLogRecovery() throws IOException {
- for (EditLogFile log : logs) {
- if (log.isInProgress()) {
- LOG.warn("Log at " + log.getFile() + " is in progress, but " +
- "other logs starting at the same txid " + startTxId +
- " are finalized. Moving aside.");
- log.markCorrupt();
- }
- }
- }
-
- /**
- * Recovery case for when all of the logs in the group were in progress.
- * This happens if the NN completely crashes and restarts. In this case
- * we check the non-zero lengths of each log file, and any logs that are
- * less than the max of these lengths are considered corrupt.
- */
- private void planAllInProgressRecovery() throws IOException {
- // We only have in-progress logs. We need to figure out which logs have
- // the latest data to reccover them
- LOG.warn("Logs beginning at txid " + startTxId + " were are all " +
- "in-progress (probably truncated due to a previous NameNode " +
- "crash)");
- if (logs.size() == 1) {
- // Only one log, it's our only choice!
- EditLogFile log = logs.get(0);
- if (log.validateLog().numTransactions == 0) {
- // If it has no transactions, we should consider it corrupt just
- // to be conservative.
- // See comment below for similar case
- LOG.warn("Marking log at " + log.getFile() + " as corrupt since " +
- "it has no transactions in it.");
- log.markCorrupt();
- }
- return;
- }
-
- long maxValidTxnCount = Long.MIN_VALUE;
- for (EditLogFile log : logs) {
- long validTxnCount = log.validateLog().numTransactions;
- LOG.warn(" Log " + log.getFile() +
- " valid txns=" + validTxnCount +
- " valid len=" + log.validateLog().validLength);
- maxValidTxnCount = Math.max(maxValidTxnCount, validTxnCount);
- }
-
- for (EditLogFile log : logs) {
- long txns = log.validateLog().numTransactions;
- if (txns < maxValidTxnCount) {
- LOG.warn("Marking log at " + log.getFile() + " as corrupt since " +
- "it is has only " + txns + " valid txns whereas another " +
- "log has " + maxValidTxnCount);
- log.markCorrupt();
- } else if (txns == 0) {
- // this can happen if the NN crashes right after rolling a log
- // but before the START_LOG_SEGMENT txn is written. Since the log
- // is empty, we can just move it aside to its corrupt name.
- LOG.warn("Marking log at " + log.getFile() + " as corrupt since " +
- "it has no transactions in it.");
- log.markCorrupt();
- }
- }
- }
-
- /**
- * Check for the case when we have multiple finalized logs and they have
- * different ending transaction IDs. This violates an invariant that all
- * log directories should roll together. We should abort in this case.
- */
- private void checkConsistentEndTxIds() throws IOException {
- if (hasFinalized && endTxIds.size() > 1) {
- throw new IOException("More than one ending txid was found " +
- "for logs starting at txid " + startTxId + ". " +
- "Found: " + StringUtils.join(endTxIds, ','));
- }
- }
-
- void recover() throws IOException {
- for (EditLogFile log : logs) {
- if (log.isCorrupt()) {
- log.moveAsideCorruptFile();
- } else if (log.isInProgress()) {
- log.finalizeLog();
- }
- }
- }
- }
-
- static class TransactionalLoadPlan extends LoadPlan {
- final FSImageFile image;
- final LogLoadPlan logPlan;
-
- public TransactionalLoadPlan(FSImageFile image,
- LogLoadPlan logPlan) {
- super();
- this.image = image;
- this.logPlan = logPlan;
- }
-
- @Override
- boolean doRecovery() throws IOException {
- logPlan.doRecovery();
- return false;
- }
-
- @Override
- File getImageFile() {
- return image.getFile();
- }
-
- @Override
- List getEditsFiles() {
- return logPlan.getEditsFiles();
- }
-
- @Override
- StorageDirectory getStorageDirectoryForProperties() {
- return image.sd;
- }
- }
-
- static class LogLoadPlan {
- final List editLogs;
- final List logGroupsToRecover;
-
- LogLoadPlan(List editLogs,
- List logGroupsToRecover) {
- this.editLogs = editLogs;
- this.logGroupsToRecover = logGroupsToRecover;
- }
-
- public void doRecovery() throws IOException {
- for (LogGroup g : logGroupsToRecover) {
- g.recover();
- }
- }
-
- public List getEditsFiles() {
- List ret = new ArrayList();
- for (EditLogFile log : editLogs) {
- ret.add(log.getFile());
- }
- return ret;
- }
+ @Override
+ long getMaxSeenTxId() {
+ return maxSeenTxId;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ceb557b4e6..116fa4826a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -78,10 +78,10 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -99,9 +99,9 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Util;
@@ -134,6 +134,8 @@
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
+import com.google.common.base.Preconditions;
+
/***************************************************
* FSNamesystem does the actual bookkeeping work for the
* DataNode.
@@ -258,12 +260,43 @@ private static final void logAuditEvent(UserGroupInformation ugi,
// lock to protect FSNamesystem.
private ReentrantReadWriteLock fsLock;
+
/**
- * FSNamesystem constructor.
+ * Instantiates an FSNamesystem loaded from the image and edits
+ * directories specified in the passed Configuration.
+ *
+ * @param conf the Configuration which specifies the storage directories
+ * from which to load
+ * @return an FSNamesystem which contains the loaded namespace
+ * @throws IOException if loading fails
*/
- FSNamesystem(Configuration conf) throws IOException {
+ public static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
+ FSImage fsImage = new FSImage(conf);
+ FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
+
+ long loadStart = now();
+ StartupOption startOpt = NameNode.getStartupOption(conf);
+ namesystem.loadFSImage(startOpt, fsImage);
+ long timeTakenToLoadFSImage = now() - loadStart;
+ LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
+ NameNode.getNameNodeMetrics().setFsImageLoadTime(
+ (int) timeTakenToLoadFSImage);
+ return namesystem;
+ }
+
+ /**
+ * Create an FSNamesystem associated with the specified image.
+ *
+ * Note that this does not load any data off of disk -- if you would
+ * like that behavior, use {@link #loadFromDisk(Configuration)}
+
+ * @param fnImage The FSImage to associate with
+ * @param conf configuration
+ * @throws IOException on bad configuration
+ */
+ FSNamesystem(Configuration conf, FSImage fsImage) throws IOException {
try {
- initialize(conf, null);
+ initialize(conf, fsImage);
} catch(IOException e) {
LOG.error(getClass().getSimpleName() + " initialization failed.", e);
close();
@@ -279,29 +312,41 @@ private void initialize(Configuration conf, FSImage fsImage)
resourceRecheckInterval = conf.getLong(
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
- nnResourceChecker = new NameNodeResourceChecker(conf);
- checkAvailableResources();
this.systemStart = now();
this.blockManager = new BlockManager(this, conf);
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
this.fsLock = new ReentrantReadWriteLock(true); // fair locking
setConfigurationParameters(conf);
dtSecretManager = createDelegationTokenSecretManager(conf);
- this.registerMBean(); // register the MBean for the FSNamesystemState
- if(fsImage == null) {
- this.dir = new FSDirectory(this, conf);
- StartupOption startOpt = NameNode.getStartupOption(conf);
- this.dir.loadFSImage(startOpt);
- long timeTakenToLoadFSImage = now() - systemStart;
- LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
- NameNode.getNameNodeMetrics().setFsImageLoadTime(
- (int) timeTakenToLoadFSImage);
- } else {
- this.dir = new FSDirectory(fsImage, this, conf);
- }
+ this.dir = new FSDirectory(fsImage, this, conf);
this.safeMode = new SafeModeInfo(conf);
}
+ void loadFSImage(StartupOption startOpt, FSImage fsImage)
+ throws IOException {
+ // format before starting up if requested
+ if (startOpt == StartupOption.FORMAT) {
+
+ fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id
+
+ startOpt = StartupOption.REGULAR;
+ }
+ boolean success = false;
+ try {
+ if (fsImage.recoverTransitionRead(startOpt, this)) {
+ fsImage.saveNamespace(this);
+ }
+ fsImage.openEditLog();
+
+ success = true;
+ } finally {
+ if (!success) {
+ fsImage.close();
+ }
+ }
+ dir.imageLoadComplete();
+ }
+
void activateSecretManager() throws IOException {
if (dtSecretManager != null) {
dtSecretManager.startThreads();
@@ -312,8 +357,13 @@ void activateSecretManager() throws IOException {
* Activate FSNamesystem daemons.
*/
void activate(Configuration conf) throws IOException {
+ this.registerMBean(); // register the MBean for the FSNamesystemState
+
writeLock();
try {
+ nnResourceChecker = new NameNodeResourceChecker(conf);
+ checkAvailableResources();
+
setBlockTotal();
blockManager.activate(conf);
@@ -396,36 +446,6 @@ public boolean hasReadOrWriteLock() {
return hasReadLock() || hasWriteLock();
}
- /**
- * dirs is a list of directories where the filesystem directory state
- * is stored
- */
- FSNamesystem(FSImage fsImage, Configuration conf) throws IOException {
- this.fsLock = new ReentrantReadWriteLock(true);
- this.blockManager = new BlockManager(this, conf);
- setConfigurationParameters(conf);
- this.dir = new FSDirectory(fsImage, this, conf);
- dtSecretManager = createDelegationTokenSecretManager(conf);
- }
-
- /**
- * Create FSNamesystem for {@link BackupNode}.
- * Should do everything that would be done for the NameNode,
- * except for loading the image.
- *
- * @param bnImage {@link BackupImage}
- * @param conf configuration
- * @throws IOException
- */
- FSNamesystem(Configuration conf, BackupImage bnImage) throws IOException {
- try {
- initialize(conf, bnImage);
- } catch(IOException e) {
- LOG.error(getClass().getSimpleName() + " initialization failed.", e);
- close();
- throw e;
- }
- }
/**
* Initializes some of the members from configuration
@@ -475,15 +495,22 @@ protected PermissionStatus getUpgradePermission() {
NamespaceInfo getNamespaceInfo() {
readLock();
try {
- return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
- getClusterId(), getBlockPoolId(),
- dir.fsImage.getStorage().getCTime(),
- upgradeManager.getUpgradeVersion());
+ return unprotectedGetNamespaceInfo();
} finally {
readUnlock();
}
}
+ /**
+ * Version of {@see #getNamespaceInfo()} that is not protected by a lock.
+ */
+ NamespaceInfo unprotectedGetNamespaceInfo() {
+ return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
+ getClusterId(), getBlockPoolId(),
+ dir.fsImage.getStorage().getCTime(),
+ upgradeManager.getUpgradeVersion());
+ }
+
/**
* Close down this file system manager.
* Causes heartbeat and lease daemons to stop; waits briefly for
@@ -2537,6 +2564,8 @@ private boolean nameNodeHasResourcesAvailable() {
* @throws IOException
*/
private void checkAvailableResources() throws IOException {
+ Preconditions.checkState(nnResourceChecker != null,
+ "nnResourceChecker not initialized");
hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace();
}
@@ -2697,7 +2726,7 @@ void saveNamespace() throws AccessControlException, IOException {
throw new IOException("Safe mode should be turned ON " +
"in order to create namespace image.");
}
- getFSImage().saveNamespace();
+ getFSImage().saveNamespace(this);
LOG.info("New namespace image has been created.");
} finally {
readUnlock();
@@ -2756,7 +2785,7 @@ void finalizeUpgrade() throws IOException {
* not tracked because the name node is not intended to leave safe mode
* automatically in the case.
*
- * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
+ * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
* @see SafeModeMonitor
*/
class SafeModeInfo {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
index b7587c0dd1..d8bd502597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
@@ -38,7 +38,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
@@ -120,7 +120,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response
new HdfsConfiguration(datanode.getConf());
final int socketTimeout = conf.getInt(
DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
- HdfsConstants.READ_TIMEOUT);
+ HdfsServerConstants.READ_TIMEOUT);
final SocketFactory socketFactory = NetUtils.getSocketFactory(conf,
ClientProtocol.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 991d7f5c5d..6e4c17161a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -23,11 +23,14 @@
import java.io.File;
import java.io.IOException;
import java.util.List;
+import java.util.HashMap;
import java.util.Comparator;
+import java.util.Collections;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
@@ -57,6 +60,9 @@ class FileJournalManager implements JournalManager {
private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
+ private File currentInProgress = null;
+ private long maxSeenTransaction = 0L;
+
@VisibleForTesting
StoragePurger purger
= new NNStorageRetentionManager.DeletionStoragePurger();
@@ -66,19 +72,20 @@ public FileJournalManager(StorageDirectory sd) {
}
@Override
- public EditLogOutputStream startLogSegment(long txid) throws IOException {
- File newInProgress = NNStorage.getInProgressEditsFile(sd, txid);
- EditLogOutputStream stm = new EditLogFileOutputStream(newInProgress,
+ synchronized public EditLogOutputStream startLogSegment(long txid)
+ throws IOException {
+ currentInProgress = NNStorage.getInProgressEditsFile(sd, txid);
+ EditLogOutputStream stm = new EditLogFileOutputStream(currentInProgress,
outputBufferCapacity);
stm.create();
return stm;
}
@Override
- public void finalizeLogSegment(long firstTxId, long lastTxId)
+ synchronized public void finalizeLogSegment(long firstTxId, long lastTxId)
throws IOException {
- File inprogressFile = NNStorage.getInProgressEditsFile(
- sd, firstTxId);
+ File inprogressFile = NNStorage.getInProgressEditsFile(sd, firstTxId);
+
File dstFile = NNStorage.getFinalizedEditsFile(
sd, firstTxId, lastTxId);
LOG.debug("Finalizing edits file " + inprogressFile + " -> " + dstFile);
@@ -89,6 +96,9 @@ public void finalizeLogSegment(long firstTxId, long lastTxId)
if (!inprogressFile.renameTo(dstFile)) {
throw new IOException("Unable to finalize edits file " + inprogressFile);
}
+ if (inprogressFile.equals(currentInProgress)) {
+ currentInProgress = null;
+ }
}
@VisibleForTesting
@@ -97,12 +107,7 @@ public StorageDirectory getStorageDirectory() {
}
@Override
- public String toString() {
- return "FileJournalManager for storage directory " + sd;
- }
-
- @Override
- public void setOutputBufferCapacity(int size) {
+ synchronized public void setOutputBufferCapacity(int size) {
this.outputBufferCapacity = size;
}
@@ -120,13 +125,6 @@ public void purgeLogsOlderThan(long minTxIdToKeep)
}
}
- @Override
- public EditLogInputStream getInProgressInputStream(long segmentStartsAtTxId)
- throws IOException {
- File f = NNStorage.getInProgressEditsFile(sd, segmentStartsAtTxId);
- return new EditLogFileInputStream(f);
- }
-
/**
* Find all editlog segments starting at or above the given txid.
* @param fromTxId the txnid which to start looking
@@ -178,17 +176,156 @@ static List matchEditLogs(File[] filesInStorage) {
try {
long startTxId = Long.valueOf(inProgressEditsMatch.group(1));
ret.add(
- new EditLogFile(f, startTxId, EditLogFile.UNKNOWN_END));
+ new EditLogFile(f, startTxId, startTxId, true));
} catch (NumberFormatException nfe) {
LOG.error("In-progress edits file " + f + " has improperly " +
"formatted transaction ID");
// skip
- }
+ }
}
}
return ret;
}
+ @Override
+ synchronized public EditLogInputStream getInputStream(long fromTxId)
+ throws IOException {
+ for (EditLogFile elf : getLogFiles(fromTxId)) {
+ if (elf.getFirstTxId() == fromTxId) {
+ if (elf.isInProgress()) {
+ elf.validateLog();
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Returning edit stream reading from " + elf);
+ }
+ return new EditLogFileInputStream(elf.getFile(),
+ elf.getFirstTxId(), elf.getLastTxId());
+ }
+ }
+
+ throw new IOException("Cannot find editlog file with " + fromTxId
+ + " as first first txid");
+ }
+
+ @Override
+ public long getNumberOfTransactions(long fromTxId)
+ throws IOException, CorruptionException {
+ long numTxns = 0L;
+
+ for (EditLogFile elf : getLogFiles(fromTxId)) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Counting " + elf);
+ }
+ if (elf.getFirstTxId() > fromTxId) { // there must be a gap
+ LOG.warn("Gap in transactions in " + sd.getRoot() + ". Gap is "
+ + fromTxId + " - " + (elf.getFirstTxId() - 1));
+ break;
+ } else if (fromTxId == elf.getFirstTxId()) {
+ if (elf.isInProgress()) {
+ elf.validateLog();
+ }
+
+ if (elf.isCorrupt()) {
+ break;
+ }
+ fromTxId = elf.getLastTxId() + 1;
+ numTxns += fromTxId - elf.getFirstTxId();
+
+ if (elf.isInProgress()) {
+ break;
+ }
+ } // else skip
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Journal " + this + " has " + numTxns
+ + " txns from " + fromTxId);
+ }
+
+ long max = findMaxTransaction();
+ // fromTxId should be greater than max, as it points to the next
+ // transaction we should expect to find. If it is less than or equal
+ // to max, it means that a transaction with txid == max has not been found
+ if (numTxns == 0 && fromTxId <= max) {
+ String error = String.format("Gap in transactions, max txnid is %d"
+ + ", 0 txns from %d", max, fromTxId);
+ LOG.error(error);
+ throw new CorruptionException(error);
+ }
+
+ return numTxns;
+ }
+
+ @Override
+ synchronized public void recoverUnfinalizedSegments() throws IOException {
+ File currentDir = sd.getCurrentDir();
+ List allLogFiles = matchEditLogs(currentDir.listFiles());
+
+ // make sure journal is aware of max seen transaction before moving corrupt
+ // files aside
+ findMaxTransaction();
+
+ for (EditLogFile elf : allLogFiles) {
+ if (elf.getFile().equals(currentInProgress)) {
+ continue;
+ }
+ if (elf.isInProgress()) {
+ elf.validateLog();
+
+ if (elf.isCorrupt()) {
+ elf.moveAsideCorruptFile();
+ continue;
+ }
+ finalizeLogSegment(elf.getFirstTxId(), elf.getLastTxId());
+ }
+ }
+ }
+
+ private List getLogFiles(long fromTxId) throws IOException {
+ File currentDir = sd.getCurrentDir();
+ List allLogFiles = matchEditLogs(currentDir.listFiles());
+ List logFiles = Lists.newArrayList();
+
+ for (EditLogFile elf : allLogFiles) {
+ if (fromTxId > elf.getFirstTxId()
+ && fromTxId <= elf.getLastTxId()) {
+ throw new IOException("Asked for fromTxId " + fromTxId
+ + " which is in middle of file " + elf.file);
+ }
+ if (fromTxId <= elf.getFirstTxId()) {
+ logFiles.add(elf);
+ }
+ }
+
+ Collections.sort(logFiles, EditLogFile.COMPARE_BY_START_TXID);
+
+ return logFiles;
+ }
+
+ /**
+ * Find the maximum transaction in the journal.
+ * This gets stored in a member variable, as corrupt edit logs
+ * will be moved aside, but we still need to remember their first
+ * tranaction id in the case that it was the maximum transaction in
+ * the journal.
+ */
+ private long findMaxTransaction()
+ throws IOException {
+ for (EditLogFile elf : getLogFiles(0)) {
+ if (elf.isInProgress()) {
+ maxSeenTransaction = Math.max(elf.getFirstTxId(), maxSeenTransaction);
+ elf.validateLog();
+ }
+ maxSeenTransaction = Math.max(elf.getLastTxId(), maxSeenTransaction);
+ }
+ return maxSeenTransaction;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("FileJournalManager(root=%s)", sd.getRoot());
+ }
+
/**
* Record of an edit log that has been located and had its filename parsed.
*/
@@ -196,12 +333,10 @@ static class EditLogFile {
private File file;
private final long firstTxId;
private long lastTxId;
-
- private EditLogValidation cachedValidation = null;
+
private boolean isCorrupt = false;
-
- static final long UNKNOWN_END = -1;
-
+ private final boolean isInProgress;
+
final static Comparator COMPARE_BY_START_TXID
= new Comparator() {
public int compare(EditLogFile a, EditLogFile b) {
@@ -214,30 +349,24 @@ public int compare(EditLogFile a, EditLogFile b) {
EditLogFile(File file,
long firstTxId, long lastTxId) {
- assert lastTxId == UNKNOWN_END || lastTxId >= firstTxId;
- assert firstTxId > 0;
+ this(file, firstTxId, lastTxId, false);
+ assert (lastTxId != HdfsConstants.INVALID_TXID)
+ && (lastTxId >= firstTxId);
+ }
+
+ EditLogFile(File file, long firstTxId,
+ long lastTxId, boolean isInProgress) {
+ assert (lastTxId == HdfsConstants.INVALID_TXID && isInProgress)
+ || (lastTxId != HdfsConstants.INVALID_TXID && lastTxId >= firstTxId);
+ assert (firstTxId > 0) || (firstTxId == HdfsConstants.INVALID_TXID);
assert file != null;
this.firstTxId = firstTxId;
this.lastTxId = lastTxId;
this.file = file;
+ this.isInProgress = isInProgress;
}
- public void finalizeLog() throws IOException {
- long numTransactions = validateLog().numTransactions;
- long lastTxId = firstTxId + numTransactions - 1;
- File dst = new File(file.getParentFile(),
- NNStorage.getFinalizedEditsFileName(firstTxId, lastTxId));
- LOG.info("Finalizing edits log " + file + " by renaming to "
- + dst.getName());
- if (!file.renameTo(dst)) {
- throw new IOException("Couldn't finalize log " +
- file + " to " + dst);
- }
- this.lastTxId = lastTxId;
- file = dst;
- }
-
long getFirstTxId() {
return firstTxId;
}
@@ -246,15 +375,22 @@ long getLastTxId() {
return lastTxId;
}
- EditLogValidation validateLog() throws IOException {
- if (cachedValidation == null) {
- cachedValidation = EditLogFileInputStream.validateEditLog(file);
+ /**
+ * Count the number of valid transactions in a log.
+ * This will update the lastTxId of the EditLogFile or
+ * mark it as corrupt if it is.
+ */
+ void validateLog() throws IOException {
+ EditLogValidation val = EditLogFileInputStream.validateEditLog(file);
+ if (val.getNumTransactions() == 0) {
+ markCorrupt();
+ } else {
+ this.lastTxId = val.getEndTxId();
}
- return cachedValidation;
}
boolean isInProgress() {
- return (lastTxId == UNKNOWN_END);
+ return isInProgress;
}
File getFile() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
index 3831b4580f..8476e27cdc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
@@ -29,7 +29,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.security.UserGroupInformation;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java
index 2c0f81abc5..4fc9dcca63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java
@@ -75,7 +75,7 @@ public Void run() throws Exception {
+ ":" + NameNode.getAddress(conf).getPort();
Token token =
- nn.getDelegationToken(new Text(renewerFinal));
+ nn.getRpcServer().getDelegationToken(new Text(renewerFinal));
if(token == null) {
throw new Exception("couldn't get the token for " +s);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
index 7663ecff76..2440c4dd12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
/**
* I-node for file being written.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
index d62aaa7e5a..8440fe049b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
@@ -41,6 +41,25 @@ interface JournalManager {
*/
void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException;
+ /**
+ * Get the input stream starting with fromTxnId from this journal manager
+ * @param fromTxnId the first transaction id we want to read
+ * @return the stream starting with transaction fromTxnId
+ * @throws IOException if a stream cannot be found.
+ */
+ EditLogInputStream getInputStream(long fromTxnId) throws IOException;
+
+ /**
+ * Get the number of transaction contiguously available from fromTxnId.
+ *
+ * @param fromTxnId Transaction id to count from
+ * @return The number of transactions available from fromTxnId
+ * @throws IOException if the journal cannot be read.
+ * @throws CorruptionException if there is a gap in the journal at fromTxnId.
+ */
+ long getNumberOfTransactions(long fromTxnId)
+ throws IOException, CorruptionException;
+
/**
* Set the amount of memory that this stream should use to buffer edits
*/
@@ -59,10 +78,21 @@ void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException;
/**
- * @return an EditLogInputStream that reads from the same log that
- * the edit log is currently writing. May return null if this journal
- * manager does not support this operation.
- */
- EditLogInputStream getInProgressInputStream(long segmentStartsAtTxId)
- throws IOException;
+ * Recover segments which have not been finalized.
+ */
+ void recoverUnfinalizedSegments() throws IOException;
+
+ /**
+ * Indicate that a journal is cannot be used to load a certain range of
+ * edits.
+ * This exception occurs in the case of a gap in the transactions, or a
+ * corrupt edit file.
+ */
+ public static class CorruptionException extends IOException {
+ static final long serialVersionUID = -4687802717006172702L;
+
+ public CorruptionException(String reason) {
+ super(reason);
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 257d37e0cb..44857739b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -32,8 +32,8 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import static org.apache.hadoop.hdfs.server.common.Util.now;
@@ -65,8 +65,8 @@ public class LeaseManager {
private final FSNamesystem fsnamesystem;
- private long softLimit = FSConstants.LEASE_SOFTLIMIT_PERIOD;
- private long hardLimit = FSConstants.LEASE_HARDLIMIT_PERIOD;
+ private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
+ private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD;
//
// Used for handling lock-leases
@@ -379,7 +379,7 @@ public void run() {
try {
- Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
+ Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
} catch(InterruptedException ie) {
if (LOG.isDebugEnabled()) {
LOG.debug(name + " is interrupted", ie);
@@ -409,7 +409,7 @@ private synchronized void checkLeases() {
oldest.getPaths().toArray(leasePaths);
for(String p : leasePaths) {
try {
- if(fsnamesystem.internalReleaseLease(oldest, p, HdfsConstants.NAMENODE_LEASE_HOLDER)) {
+ if(fsnamesystem.internalReleaseLease(oldest, p, HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
LOG.info("Lease recovery for file " + p +
" is complete. File closed.");
removing.add(p);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 00461e2fb3..869922abb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -42,11 +42,11 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeManager;
@@ -126,7 +126,7 @@ public boolean isOfType(StorageDirType type) {
* recent fsimage file. This does not include any transactions
* that have since been written to the edit log.
*/
- protected long mostRecentCheckpointTxId = FSConstants.INVALID_TXID;
+ protected long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID;
/**
* list of failed (and thus removed) storages
@@ -501,7 +501,7 @@ private void format(StorageDirectory sd) throws IOException {
* Format all available storage directories.
*/
public void format(String clusterId) throws IOException {
- this.layoutVersion = FSConstants.LAYOUT_VERSION;
+ this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
this.namespaceID = newNamespaceID();
this.clusterID = clusterId;
this.blockpoolID = newBlockPoolID();
@@ -574,7 +574,7 @@ private void setDeprecatedPropertiesForUpgrade(Properties props) {
* This should only be used during upgrades.
*/
String getDeprecatedProperty(String prop) {
- assert getLayoutVersion() > FSConstants.LAYOUT_VERSION :
+ assert getLayoutVersion() > HdfsConstants.LAYOUT_VERSION :
"getDeprecatedProperty should only be done when loading " +
"storage from past versions during upgrade.";
return deprecatedProperties.get(prop);
@@ -764,7 +764,7 @@ void verifyDistributedUpgradeProgress(StartupOption startOpt
if(upgradeManager.getDistributedUpgrades() != null)
throw new IOException("\n Distributed upgrade for NameNode version "
+ upgradeManager.getUpgradeVersion()
- + " to current LV " + FSConstants.LAYOUT_VERSION
+ + " to current LV " + HdfsConstants.LAYOUT_VERSION
+ " is required.\n Please restart NameNode"
+ " with -upgrade option.");
}
@@ -780,7 +780,7 @@ void initializeDistributedUpgrade() throws IOException {
writeAll();
LOG.info("\n Distributed upgrade for NameNode version "
+ upgradeManager.getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is initialized.");
+ + HdfsConstants.LAYOUT_VERSION + " is initialized.");
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index fe32cdd63d..ed972c8f96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -21,9 +21,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
-import java.util.Arrays;
import java.util.Collection;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@@ -34,82 +32,37 @@
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Trash;
-import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
-import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_LENGTH;
-import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_DEPTH;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
-import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
-import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
-import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.io.EnumSetWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.Node;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
@@ -152,7 +105,7 @@
* NameNode state, for example partial blocksMap etc.
**********************************************************/
@InterfaceAudience.Private
-public class NameNode implements NamenodeProtocols {
+public class NameNode {
static{
HdfsConfiguration.init();
}
@@ -219,12 +172,6 @@ public long getProtocolVersion(String protocol,
}
- @Override // VersionedProtocol
- public ProtocolSignature getProtocolSignature(String protocol,
- long clientVersion, int clientMethodsHash) throws IOException {
- return ProtocolSignature.getProtocolSignature(
- this, protocol, clientVersion, clientMethodsHash);
- }
public static final int DEFAULT_PORT = 8020;
@@ -239,18 +186,6 @@ public ProtocolSignature getProtocolSignature(String protocol,
private final boolean haEnabled;
- /** RPC server. Package-protected for use in tests. */
- Server server;
- /** RPC server for HDFS Services communication.
- BackupNode, Datanodes and all other services
- should be connecting to this server if it is
- configured. Clients should only go to NameNode#server
- */
- protected Server serviceRpcServer;
- /** RPC server address */
- protected InetSocketAddress rpcAddress = null;
- /** RPC server for DN address */
- protected InetSocketAddress serviceRPCAddress = null;
/** httpServer */
protected NameNodeHttpServer httpServer;
private Thread emptier;
@@ -258,11 +193,11 @@ public ProtocolSignature getProtocolSignature(String protocol,
protected boolean stopRequested = false;
/** Registration information of this name-node */
protected NamenodeRegistration nodeRegistration;
- /** Is service level authorization enabled? */
- private boolean serviceAuthEnabled = false;
/** Activated plug-ins. */
private List plugins;
+ private NameNodeRpcServer rpcServer;
+
/** Format a new filesystem. Destroys any filesystem that may already
* exist at this location. **/
public static void format(Configuration conf) throws IOException {
@@ -278,6 +213,10 @@ public FSNamesystem getNamesystem() {
return namesystem;
}
+ public NamenodeProtocols getRpcServer() {
+ return rpcServer;
+ }
+
static void initMetrics(Configuration conf, NamenodeRole role) {
metrics = NameNodeMetrics.create(conf, role);
}
@@ -327,19 +266,19 @@ public static InetSocketAddress getAddress(Configuration conf) {
* @param filesystemURI
* @return address of file system
*/
- public static InetSocketAddress getAddress(URI filesystemURI) {
+ static InetSocketAddress getAddress(URI filesystemURI) {
String authority = filesystemURI.getAuthority();
if (authority == null) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s has no authority.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
}
- if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
+ if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
filesystemURI.getScheme())) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
- FSConstants.HDFS_URI_SCHEME));
+ HdfsConstants.HDFS_URI_SCHEME));
}
return getAddress(authority);
}
@@ -347,7 +286,7 @@ public static InetSocketAddress getAddress(URI filesystemURI) {
public static URI getUri(InetSocketAddress namenode) {
int port = namenode.getPort();
String portString = port == DEFAULT_PORT ? "" : (":"+port);
- return URI.create(FSConstants.HDFS_URI_SCHEME + "://"
+ return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ namenode.getHostName()+portString);
}
@@ -385,11 +324,13 @@ protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOExc
/**
* Modifies the configuration passed to contain the service rpc address setting
*/
- protected void setRpcServiceServerAddress(Configuration conf) {
+ protected void setRpcServiceServerAddress(Configuration conf,
+ InetSocketAddress serviceRPCAddress) {
setServiceAddress(conf, getHostPortString(serviceRPCAddress));
}
- protected void setRpcServerAddress(Configuration conf) {
+ protected void setRpcServerAddress(Configuration conf,
+ InetSocketAddress rpcAddress) {
FileSystem.setDefaultUri(conf, getUri(rpcAddress));
}
@@ -404,7 +345,7 @@ protected void setHttpServerAddress(Configuration conf) {
}
protected void loadNamesystem(Configuration conf) throws IOException {
- this.namesystem = new FSNamesystem(conf);
+ this.namesystem = FSNamesystem.loadFromDisk(conf);
}
NamenodeRegistration getRegistration() {
@@ -413,7 +354,7 @@ NamenodeRegistration getRegistration() {
NamenodeRegistration setRegistration() {
nodeRegistration = new NamenodeRegistration(
- getHostPortString(rpcAddress),
+ getHostPortString(rpcServer.getRpcAddress()),
getHostPortString(getHttpAddress()),
getFSImage().getStorage(), getRole());
return nodeRegistration;
@@ -435,45 +376,13 @@ void loginAsNameNodeUser(Configuration conf) throws IOException {
*/
protected void initialize(Configuration conf) throws IOException {
initializeGenericKeys(conf);
- InetSocketAddress socAddr = getRpcServerAddress(conf);
UserGroupInformation.setConfiguration(conf);
loginAsNameNodeUser(conf);
- int handlerCount =
- conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
- DFS_DATANODE_HANDLER_COUNT_DEFAULT);
NameNode.initMetrics(conf, this.getRole());
loadNamesystem(conf);
- // create rpc server
- InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
- if (dnSocketAddr != null) {
- int serviceHandlerCount =
- conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
- DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
- this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
- dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
- false, conf, namesystem.getDelegationTokenSecretManager());
- this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
- setRpcServiceServerAddress(conf);
- }
- this.server = RPC.getServer(NamenodeProtocols.class, this,
- socAddr.getHostName(), socAddr.getPort(),
- handlerCount, false, conf,
- namesystem.getDelegationTokenSecretManager());
- // set service-level authorization security policy
- if (serviceAuthEnabled =
- conf.getBoolean(
- CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
- this.server.refreshServiceAcl(conf, new HDFSPolicyProvider());
- if (this.serviceRpcServer != null) {
- this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
- }
- }
-
- // The rpc-server port can be ephemeral... ensure we have the correct info
- this.rpcAddress = this.server.getListenerAddress();
- setRpcServerAddress(conf);
+ rpcServer = createRpcServer(conf);
try {
validateConfigurationSettings(conf);
@@ -485,6 +394,15 @@ protected void initialize(Configuration conf) throws IOException {
activate(conf);
}
+ /**
+ * Create the RPC server implementation. Used as an extension point for the
+ * BackupNode.
+ */
+ protected NameNodeRpcServer createRpcServer(Configuration conf)
+ throws IOException {
+ return new NameNodeRpcServer(conf, this);
+ }
+
/**
* Verifies that the final Configuration Settings look ok for the NameNode to
* properly start up
@@ -517,10 +435,7 @@ void activate(Configuration conf) throws IOException {
}
namesystem.activate(conf);
startHttpServer(conf);
- server.start(); //start RPC server
- if (serviceRpcServer != null) {
- serviceRpcServer.start();
- }
+ rpcServer.start();
startTrashEmptier(conf);
plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
@@ -532,9 +447,10 @@ void activate(Configuration conf) throws IOException {
LOG.warn("ServicePlugin " + p + " could not be started", t);
}
}
- LOG.info(getRole() + " up at: " + rpcAddress);
- if (serviceRPCAddress != null) {
- LOG.info(getRole() + " service server is up at: " + serviceRPCAddress);
+
+ LOG.info(getRole() + " up at: " + rpcServer.getRpcAddress());
+ if (rpcServer.getServiceRpcAddress() != null) {
+ LOG.info(getRole() + " service server is up at: " + rpcServer.getServiceRpcAddress());
}
}
@@ -605,7 +521,7 @@ protected NameNode(Configuration conf, NamenodeRole role)
*/
public void join() {
try {
- this.server.join();
+ this.rpcServer.join();
} catch (InterruptedException ie) {
}
}
@@ -635,8 +551,7 @@ public void stop() {
}
if(namesystem != null) namesystem.close();
if(emptier != null) emptier.interrupt();
- if(server != null) server.stop();
- if(serviceRpcServer != null) serviceRpcServer.stop();
+ if(rpcServer != null) rpcServer.stop();
if (metrics != null) {
metrics.shutdown();
}
@@ -649,440 +564,6 @@ synchronized boolean isStopRequested() {
return stopRequested;
}
- /////////////////////////////////////////////////////
- // NamenodeProtocol
- /////////////////////////////////////////////////////
- @Override // NamenodeProtocol
- public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
- throws IOException {
- if(size <= 0) {
- throw new IllegalArgumentException(
- "Unexpected not positive size: "+size);
- }
-
- return namesystem.getBlockManager().getBlocks(datanode, size);
- }
-
- @Override // NamenodeProtocol
- public ExportedBlockKeys getBlockKeys() throws IOException {
- return namesystem.getBlockManager().getBlockKeys();
- }
-
- @Override // NamenodeProtocol
- public void errorReport(NamenodeRegistration registration,
- int errorCode,
- String msg) throws IOException {
- checkOperation(OperationCategory.WRITE);
- verifyRequest(registration);
- LOG.info("Error report from " + registration + ": " + msg);
- if(errorCode == FATAL)
- namesystem.releaseBackupNode(registration);
- }
-
- @Override // NamenodeProtocol
- public NamenodeRegistration register(NamenodeRegistration registration)
- throws IOException {
- verifyVersion(registration.getVersion());
- NamenodeRegistration myRegistration = setRegistration();
- namesystem.registerBackupNode(registration, myRegistration);
- return myRegistration;
- }
-
- @Override // NamenodeProtocol
- public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
- throws IOException {
- verifyRequest(registration);
- if(!isRole(NamenodeRole.NAMENODE))
- throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
- return namesystem.startCheckpoint(registration, setRegistration());
- }
-
- @Override // NamenodeProtocol
- public void endCheckpoint(NamenodeRegistration registration,
- CheckpointSignature sig) throws IOException {
- checkOperation(OperationCategory.CHECKPOINT);
- namesystem.endCheckpoint(registration, sig);
- }
-
- @Override // ClientProtocol
- public Token getDelegationToken(Text renewer)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- return namesystem.getDelegationToken(renewer);
- }
-
- @Override // ClientProtocol
- public long renewDelegationToken(Token token)
- throws InvalidToken, IOException {
- checkOperation(OperationCategory.WRITE);
- return namesystem.renewDelegationToken(token);
- }
-
- @Override // ClientProtocol
- public void cancelDelegationToken(Token token)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.cancelDelegationToken(token);
- }
-
- @Override // ClientProtocol
- public LocatedBlocks getBlockLocations(String src,
- long offset,
- long length)
- throws IOException {
- checkOperation(OperationCategory.READ);
- metrics.incrGetBlockLocations();
- return namesystem.getBlockLocations(getClientMachine(),
- src, offset, length);
- }
-
- @Override // ClientProtocol
- public FsServerDefaults getServerDefaults() throws IOException {
- return namesystem.getServerDefaults();
- }
-
- @Override // ClientProtocol
- public void create(String src,
- FsPermission masked,
- String clientName,
- EnumSetWritable flag,
- boolean createParent,
- short replication,
- long blockSize) throws IOException {
- checkOperation(OperationCategory.WRITE);
- String clientMachine = getClientMachine();
- if (stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*DIR* NameNode.create: file "
- +src+" for "+clientName+" at "+clientMachine);
- }
- if (!checkPathLength(src)) {
- throw new IOException("create: Pathname too long. Limit "
- + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
- }
- namesystem.startFile(src,
- new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
- null, masked),
- clientName, clientMachine, flag.get(), createParent, replication, blockSize);
- metrics.incrFilesCreated();
- metrics.incrCreateFileOps();
- }
-
- @Override // ClientProtocol
- public LocatedBlock append(String src, String clientName)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- String clientMachine = getClientMachine();
- if (stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*DIR* NameNode.append: file "
- +src+" for "+clientName+" at "+clientMachine);
- }
- LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
- metrics.incrFilesAppended();
- return info;
- }
-
- @Override // ClientProtocol
- public boolean recoverLease(String src, String clientName) throws IOException {
- checkOperation(OperationCategory.WRITE);
- String clientMachine = getClientMachine();
- return namesystem.recoverLease(src, clientName, clientMachine);
- }
-
- @Override // ClientProtocol
- public boolean setReplication(String src, short replication)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- return namesystem.setReplication(src, replication);
- }
-
- @Override // ClientProtocol
- public void setPermission(String src, FsPermission permissions)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.setPermission(src, permissions);
- }
-
- @Override // ClientProtocol
- public void setOwner(String src, String username, String groupname)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.setOwner(src, username, groupname);
- }
-
- @Override // ClientProtocol
- public LocatedBlock addBlock(String src,
- String clientName,
- ExtendedBlock previous,
- DatanodeInfo[] excludedNodes)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
- +src+" for "+clientName);
- }
- HashMap excludedNodesSet = null;
- if (excludedNodes != null) {
- excludedNodesSet = new HashMap(excludedNodes.length);
- for (Node node:excludedNodes) {
- excludedNodesSet.put(node, node);
- }
- }
- LocatedBlock locatedBlock =
- namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
- if (locatedBlock != null)
- metrics.incrAddBlockOps();
- return locatedBlock;
- }
-
- @Override // ClientProtocol
- public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
- final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
- final int numAdditionalNodes, final String clientName
- ) throws IOException {
- checkOperation(OperationCategory.WRITE);
- if (LOG.isDebugEnabled()) {
- LOG.debug("getAdditionalDatanode: src=" + src
- + ", blk=" + blk
- + ", existings=" + Arrays.asList(existings)
- + ", excludes=" + Arrays.asList(excludes)
- + ", numAdditionalNodes=" + numAdditionalNodes
- + ", clientName=" + clientName);
- }
-
- metrics.incrGetAdditionalDatanodeOps();
-
- HashMap excludeSet = null;
- if (excludes != null) {
- excludeSet = new HashMap(excludes.length);
- for (Node node : excludes) {
- excludeSet.put(node, node);
- }
- }
- return namesystem.getAdditionalDatanode(src, blk,
- existings, excludeSet, numAdditionalNodes, clientName);
- }
-
- /**
- * The client needs to give up on the block.
- */
- @Override // ClientProtocol
- public void abandonBlock(ExtendedBlock b, String src, String holder)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
- +b+" of file "+src);
- }
- if (!namesystem.abandonBlock(b, src, holder)) {
- throw new IOException("Cannot abandon block during write to " + src);
- }
- }
-
- @Override // ClientProtocol
- public boolean complete(String src, String clientName, ExtendedBlock last)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*DIR* NameNode.complete: "
- + src + " for " + clientName);
- }
- return namesystem.completeFile(src, clientName, last);
- }
-
- /**
- * The client has detected an error on the specified located blocks
- * and is reporting them to the server. For now, the namenode will
- * mark the block as corrupt. In the future we might
- * check the blocks are actually corrupt.
- */
- @Override // ClientProtocol, DatanodeProtocol
- public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
- checkOperation(OperationCategory.WRITE);
- stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
- for (int i = 0; i < blocks.length; i++) {
- ExtendedBlock blk = blocks[i].getBlock();
- DatanodeInfo[] nodes = blocks[i].getLocations();
- for (int j = 0; j < nodes.length; j++) {
- DatanodeInfo dn = nodes[j];
- namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn);
- }
- }
- }
-
- @Override // ClientProtocol
- public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- return namesystem.updateBlockForPipeline(block, clientName);
- }
-
-
- @Override // ClientProtocol
- public void updatePipeline(String clientName, ExtendedBlock oldBlock,
- ExtendedBlock newBlock, DatanodeID[] newNodes)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes);
- }
-
- @Override // DatanodeProtocol
- public void commitBlockSynchronization(ExtendedBlock block,
- long newgenerationstamp, long newlength,
- boolean closeFile, boolean deleteblock, DatanodeID[] newtargets)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.commitBlockSynchronization(block,
- newgenerationstamp, newlength, closeFile, deleteblock, newtargets);
- }
-
- @Override // ClientProtocol
- public long getPreferredBlockSize(String filename)
- throws IOException {
- checkOperation(OperationCategory.READ);
- return namesystem.getPreferredBlockSize(filename);
- }
-
- @Deprecated
- @Override // ClientProtocol
- public boolean rename(String src, String dst) throws IOException {
- checkOperation(OperationCategory.WRITE);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
- }
- if (!checkPathLength(dst)) {
- throw new IOException("rename: Pathname too long. Limit "
- + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
- }
- boolean ret = namesystem.renameTo(src, dst);
- if (ret) {
- metrics.incrFilesRenamed();
- }
- return ret;
- }
-
- @Override // ClientProtocol
- public void concat(String trg, String[] src) throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.concat(trg, src);
- }
-
- @Override // ClientProtocol
- public void rename(String src, String dst, Options.Rename... options)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
- }
- if (!checkPathLength(dst)) {
- throw new IOException("rename: Pathname too long. Limit "
- + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
- }
- namesystem.renameTo(src, dst, options);
- metrics.incrFilesRenamed();
- }
-
- @Deprecated
- @Override // ClientProtocol
- public boolean delete(String src) throws IOException {
- checkOperation(OperationCategory.WRITE);
- return delete(src, true);
- }
-
- @Override // ClientProtocol
- public boolean delete(String src, boolean recursive) throws IOException {
- checkOperation(OperationCategory.WRITE);
- if (stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*DIR* Namenode.delete: src=" + src
- + ", recursive=" + recursive);
- }
- boolean ret = namesystem.delete(src, recursive);
- if (ret)
- metrics.incrDeleteFileOps();
- return ret;
- }
-
- /**
- * Check path length does not exceed maximum. Returns true if
- * length and depth are okay. Returns false if length is too long
- * or depth is too great.
- */
- private boolean checkPathLength(String src) {
- Path srcPath = new Path(src);
- return (src.length() <= MAX_PATH_LENGTH &&
- srcPath.depth() <= MAX_PATH_DEPTH);
- }
-
- @Override // ClientProtocol
- public boolean mkdirs(String src, FsPermission masked, boolean createParent)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
- }
- if (!checkPathLength(src)) {
- throw new IOException("mkdirs: Pathname too long. Limit "
- + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
- }
- return namesystem.mkdirs(src,
- new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
- null, masked), createParent);
- }
-
- @Override // ClientProtocol
- public void renewLease(String clientName) throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.renewLease(clientName);
- }
-
- @Override // ClientProtocol
- public DirectoryListing getListing(String src, byte[] startAfter,
- boolean needLocation) throws IOException {
- checkOperation(OperationCategory.READ);
- DirectoryListing files = namesystem.getListing(
- src, startAfter, needLocation);
- if (files != null) {
- metrics.incrGetListingOps();
- metrics.incrFilesInGetListingOps(files.getPartialListing().length);
- }
- return files;
- }
-
- @Override // ClientProtocol
- public HdfsFileStatus getFileInfo(String src) throws IOException {
- checkOperation(OperationCategory.READ);
- metrics.incrFileInfoOps();
- return namesystem.getFileInfo(src, true);
- }
-
- @Override // ClientProtocol
- public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
- checkOperation(OperationCategory.READ);
- metrics.incrFileInfoOps();
- return namesystem.getFileInfo(src, false);
- }
-
- @Override
- public long[] getStats() {
- return namesystem.getStats();
- }
-
- @Override // ClientProtocol
- public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
- throws IOException {
- checkOperation(OperationCategory.READ);
- DatanodeInfo results[] = namesystem.datanodeReport(type);
- if (results == null ) {
- throw new IOException("Cannot find datanode report");
- }
- return results;
- }
-
- @Override // ClientProtocol
- public boolean setSafeMode(SafeModeAction action) throws IOException {
- // TODO:HA decide on OperationCategory for this
- return namesystem.setSafeMode(action);
- }
-
/**
* Is the cluster currently in safe mode?
*/
@@ -1090,275 +571,8 @@ public boolean isInSafeMode() {
return namesystem.isInSafeMode();
}
- @Override // ClientProtocol
- public boolean restoreFailedStorage(String arg)
- throws AccessControlException {
- // TODO:HA decide on OperationCategory for this
- return namesystem.restoreFailedStorage(arg);
- }
-
- @Override // ClientProtocol
- public void saveNamespace() throws IOException {
- // TODO:HA decide on OperationCategory for this
- namesystem.saveNamespace();
- }
-
- @Override // ClientProtocol
- public void refreshNodes() throws IOException {
- // TODO:HA decide on OperationCategory for this
- namesystem.getBlockManager().getDatanodeManager().refreshNodes(
- new HdfsConfiguration());
- }
-
- @Override // NamenodeProtocol
- public long getTransactionID() {
- // TODO:HA decide on OperationCategory for this
- return namesystem.getEditLog().getSyncTxId();
- }
-
- @Override // NamenodeProtocol
- public CheckpointSignature rollEditLog() throws IOException {
- // TODO:HA decide on OperationCategory for this
- return namesystem.rollEditLog();
- }
-
- @Override // NamenodeProtocol
- public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
- throws IOException {
- // TODO:HA decide on OperationCategory for this
- return namesystem.getEditLog().getEditLogManifest(sinceTxId);
- }
-
- @Override // ClientProtocol
- public void finalizeUpgrade() throws IOException {
- // TODO:HA decide on OperationCategory for this
- namesystem.finalizeUpgrade();
- }
-
- @Override // ClientProtocol
- public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
- throws IOException {
- // TODO:HA decide on OperationCategory for this
- return namesystem.distributedUpgradeProgress(action);
- }
-
- @Override // ClientProtocol
- public void metaSave(String filename) throws IOException {
- // TODO:HA decide on OperationCategory for this
- namesystem.metaSave(filename);
- }
-
- @Override // ClientProtocol
- public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
- throws IOException {
- checkOperation(OperationCategory.READ);
- Collection fbs =
- namesystem.listCorruptFileBlocks(path, cookie);
-
- String[] files = new String[fbs.size()];
- String lastCookie = "";
- int i = 0;
- for(FSNamesystem.CorruptFileBlockInfo fb: fbs) {
- files[i++] = fb.path;
- lastCookie = fb.block.getBlockName();
- }
- return new CorruptFileBlocks(files, lastCookie);
- }
-
- /**
- * Tell all datanodes to use a new, non-persistent bandwidth value for
- * dfs.datanode.balance.bandwidthPerSec.
- * @param bandwidth Blanacer bandwidth in bytes per second for all datanodes.
- * @throws IOException
- */
- @Override // ClientProtocol
- public void setBalancerBandwidth(long bandwidth) throws IOException {
- // TODO:HA decide on OperationCategory for this
- namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
- }
-
- @Override // ClientProtocol
- public ContentSummary getContentSummary(String path) throws IOException {
- checkOperation(OperationCategory.READ);
- return namesystem.getContentSummary(path);
- }
-
- @Override // ClientProtocol
- public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.setQuota(path, namespaceQuota, diskspaceQuota);
- }
-
- @Override // ClientProtocol
- public void fsync(String src, String clientName) throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.fsync(src, clientName);
- }
-
- @Override // ClientProtocol
- public void setTimes(String src, long mtime, long atime)
- throws IOException {
- checkOperation(OperationCategory.WRITE);
- namesystem.setTimes(src, mtime, atime);
- }
-
- @Override // ClientProtocol
- public void createSymlink(String target, String link, FsPermission dirPerms,
- boolean createParent) throws IOException {
- checkOperation(OperationCategory.WRITE);
- metrics.incrCreateSymlinkOps();
- /* We enforce the MAX_PATH_LENGTH limit even though a symlink target
- * URI may refer to a non-HDFS file system.
- */
- if (!checkPathLength(link)) {
- throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH +
- " character limit");
-
- }
- if ("".equals(target)) {
- throw new IOException("Invalid symlink target");
- }
- final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- namesystem.createSymlink(target, link,
- new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent);
- }
-
- @Override // ClientProtocol
- public String getLinkTarget(String path) throws IOException {
- checkOperation(OperationCategory.READ);
- metrics.incrGetLinkTargetOps();
- /* Resolves the first symlink in the given path, returning a
- * new path consisting of the target of the symlink and any
- * remaining path components from the original path.
- */
- try {
- HdfsFileStatus stat = namesystem.getFileInfo(path, false);
- if (stat != null) {
- // NB: getSymlink throws IOException if !stat.isSymlink()
- return stat.getSymlink();
- }
- } catch (UnresolvedPathException e) {
- return e.getResolvedPath().toString();
- } catch (UnresolvedLinkException e) {
- // The NameNode should only throw an UnresolvedPathException
- throw new AssertionError("UnresolvedLinkException thrown");
- }
- return null;
- }
-
-
- @Override // DatanodeProtocol
- public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
- throws IOException {
- verifyVersion(nodeReg.getVersion());
- namesystem.registerDatanode(nodeReg);
-
- return nodeReg;
- }
-
- @Override // DatanodeProtocol
- public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg,
- long capacity, long dfsUsed, long remaining, long blockPoolUsed,
- int xmitsInProgress, int xceiverCount, int failedVolumes)
- throws IOException {
- verifyRequest(nodeReg);
- return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining,
- blockPoolUsed, xceiverCount, xmitsInProgress, failedVolumes);
- }
-
- @Override // DatanodeProtocol
- public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
- String poolId, long[] blocks) throws IOException {
- verifyRequest(nodeReg);
- BlockListAsLongs blist = new BlockListAsLongs(blocks);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
- + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
- + " blocks");
- }
-
- namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
- if (getFSImage().isUpgradeFinalized())
- return new DatanodeCommand.Finalize(poolId);
- return null;
- }
-
- @Override // DatanodeProtocol
- public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
- ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks) throws IOException {
- verifyRequest(nodeReg);
- if(stateChangeLog.isDebugEnabled()) {
- stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
- +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length
- +" blocks.");
- }
- namesystem.getBlockManager().blockReceivedAndDeleted(
- nodeReg, poolId, receivedAndDeletedBlocks);
- }
-
- @Override // DatanodeProtocol
- public void errorReport(DatanodeRegistration nodeReg,
- int errorCode, String msg) throws IOException {
- String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
-
- if (errorCode == DatanodeProtocol.NOTIFY) {
- LOG.info("Error report from " + dnName + ": " + msg);
- return;
- }
- verifyRequest(nodeReg);
-
- if (errorCode == DatanodeProtocol.DISK_ERROR) {
- LOG.warn("Disk error on " + dnName + ": " + msg);
- } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
- LOG.warn("Fatal disk error on " + dnName + ": " + msg);
- namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg);
- } else {
- LOG.info("Error report from " + dnName + ": " + msg);
- }
- }
-
- @Override // DatanodeProtocol, NamenodeProtocol
- public NamespaceInfo versionRequest() throws IOException {
- return namesystem.getNamespaceInfo();
- }
-
- @Override // DatanodeProtocol
- public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException {
- return namesystem.processDistributedUpgradeCommand(comm);
- }
-
- /**
- * Verify request.
- *
- * Verifies correctness of the datanode version, registration ID, and
- * if the datanode does not need to be shutdown.
- *
- * @param nodeReg data node registration
- * @throws IOException
- */
- public void verifyRequest(NodeRegistration nodeReg) throws IOException {
- verifyVersion(nodeReg.getVersion());
- if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
- LOG.warn("Invalid registrationID - expected: "
- + namesystem.getRegistrationID() + " received: "
- + nodeReg.getRegistrationID());
- throw new UnregisteredNodeException(nodeReg);
- }
- }
-
- /**
- * Verify version.
- *
- * @param version
- * @throws IOException
- */
- public void verifyVersion(int version) throws IOException {
- if (version != FSConstants.LAYOUT_VERSION)
- throw new IncorrectVersionException(version, "data node");
- }
-
- public FSImage getFSImage() {
+ /** get FSImage */
+ FSImage getFSImage() {
return namesystem.dir.fsImage;
}
@@ -1367,7 +581,7 @@ public FSImage getFSImage() {
* @return namenode rpc address
*/
public InetSocketAddress getNameNodeAddress() {
- return rpcAddress;
+ return rpcServer.getRpcAddress();
}
/**
@@ -1376,7 +590,7 @@ public InetSocketAddress getNameNodeAddress() {
* @return namenode service rpc address used by datanodes
*/
public InetSocketAddress getServiceRpcAddress() {
- return serviceRPCAddress != null ? serviceRPCAddress : rpcAddress;
+ return rpcServer.getServiceRpcAddress() != null ? rpcServer.getServiceRpcAddress() : rpcServer.getRpcAddress();
}
/**
@@ -1437,16 +651,16 @@ private static boolean format(Configuration conf,
}
System.out.println("Formatting using clusterid: " + clusterId);
- FSImage fsImage = new FSImage(conf, null, dirsToFormat, editDirsToFormat);
- FSNamesystem nsys = new FSNamesystem(fsImage, conf);
- nsys.dir.fsImage.format(clusterId);
+ FSImage fsImage = new FSImage(conf, dirsToFormat, editDirsToFormat);
+ FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+ fsImage.format(fsn, clusterId);
return false;
}
private static boolean finalize(Configuration conf,
boolean isConfirmationNeeded
) throws IOException {
- FSNamesystem nsys = new FSNamesystem(new FSImage(conf), conf);
+ FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
System.err.print(
"\"finalize\" will remove the previous state of the files system.\n"
+ "Recent upgrade will become permanent.\n"
@@ -1461,40 +675,6 @@ private static boolean finalize(Configuration conf,
return false;
}
- @Override // RefreshAuthorizationPolicyProtocol
- public void refreshServiceAcl() throws IOException {
- if (!serviceAuthEnabled) {
- throw new AuthorizationException("Service Level Authorization not enabled!");
- }
-
- this.server.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
- if (this.serviceRpcServer != null) {
- this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
- }
- }
-
- @Override // RefreshAuthorizationPolicyProtocol
- public void refreshUserToGroupsMappings() throws IOException {
- LOG.info("Refreshing all user-to-groups mappings. Requested by user: " +
- UserGroupInformation.getCurrentUser().getShortUserName());
- Groups.getUserToGroupsMappingService().refresh();
- }
-
- @Override // RefreshAuthorizationPolicyProtocol
- public void refreshSuperUserGroupsConfiguration() {
- LOG.info("Refreshing SuperUser proxy group mapping list ");
-
- ProxyUsers.refreshSuperUserGroupsConfiguration();
- }
-
- @Override // GetUserMappingsProtocol
- public String[] getGroupsForUser(String user) throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Getting groups for user " + user);
- }
- return UserGroupInformation.createRemoteUser(user).getGroupNames();
- }
-
private static void printUsage() {
System.err.println(
"Usage: java NameNode [" +
@@ -1647,7 +827,7 @@ public static void initializeGenericKeys(Configuration conf) {
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
- URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://"
+ URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
}
@@ -1666,17 +846,8 @@ public static void main(String argv[]) throws Exception {
System.exit(-1);
}
}
-
- private static String getClientMachine() {
- String clientMachine = Server.getRemoteAddress();
- if (clientMachine == null) {
- clientMachine = "";
- }
- return clientMachine;
- }
-
- @Override // HAServiceProtocol
- public synchronized void monitorHealth() throws HealthCheckFailedException {
+
+ synchronized void monitorHealth() throws HealthCheckFailedException {
if (!haEnabled) {
return; // no-op, if HA is not eanbled
}
@@ -1684,16 +855,14 @@ public synchronized void monitorHealth() throws HealthCheckFailedException {
return;
}
- @Override // HAServiceProtocol
- public synchronized void transitionToActive() throws ServiceFailedException {
+ synchronized void transitionToActive() throws ServiceFailedException {
if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled");
}
state.setState(this, ACTIVE_STATE);
}
- @Override // HAServiceProtocol
- public synchronized void transitionToStandby() throws ServiceFailedException {
+ synchronized void transitionToStandby() throws ServiceFailedException {
if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
index a15cdecabc..24f999e170 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
@@ -62,7 +62,7 @@ public NameNodeResourceChecker(Configuration conf) throws IOException {
duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,
DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT);
-
+
Collection extraCheckedVolumes = Util.stringCollectionAsURIs(conf
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
new file mode 100644
index 0000000000..7fdf3e60d3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -0,0 +1,980 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH;
+import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
+import org.apache.hadoop.ha.HealthCheckFailedException;
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.io.EnumSetWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.security.RefreshUserMappingsProtocol;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.tools.GetUserMappingsProtocol;
+
+/**
+ * This class is responsible for handling all of the RPC calls to the NameNode.
+ * It is created, started, and stopped by {@link NameNode}.
+ */
+class NameNodeRpcServer implements NamenodeProtocols {
+
+ private static final Log LOG = NameNode.LOG;
+ private static final Log stateChangeLog = NameNode.stateChangeLog;
+
+ // Dependencies from other parts of NN.
+ private final FSNamesystem namesystem;
+ protected final NameNode nn;
+ private final NameNodeMetrics metrics;
+
+ private final boolean serviceAuthEnabled;
+
+ /** The RPC server that listens to requests from DataNodes */
+ private final RPC.Server serviceRpcServer;
+ private final InetSocketAddress serviceRPCAddress;
+
+ /** The RPC server that listens to requests from clients */
+ protected final RPC.Server server;
+ protected final InetSocketAddress rpcAddress;
+
+ public NameNodeRpcServer(Configuration conf, NameNode nn)
+ throws IOException {
+ this.nn = nn;
+ this.namesystem = nn.getNamesystem();
+ this.metrics = NameNode.getNameNodeMetrics();
+
+ int handlerCount =
+ conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
+ DFS_DATANODE_HANDLER_COUNT_DEFAULT);
+ InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
+
+ InetSocketAddress dnSocketAddr = nn.getServiceRpcServerAddress(conf);
+ if (dnSocketAddr != null) {
+ int serviceHandlerCount =
+ conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+ DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+ this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
+ dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
+ false, conf, namesystem.getDelegationTokenSecretManager());
+ this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
+ nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
+ } else {
+ serviceRpcServer = null;
+ serviceRPCAddress = null;
+ }
+ this.server = RPC.getServer(NamenodeProtocols.class, this,
+ socAddr.getHostName(), socAddr.getPort(),
+ handlerCount, false, conf,
+ namesystem.getDelegationTokenSecretManager());
+
+ // set service-level authorization security policy
+ if (serviceAuthEnabled =
+ conf.getBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+ this.server.refreshServiceAcl(conf, new HDFSPolicyProvider());
+ if (this.serviceRpcServer != null) {
+ this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+ }
+ }
+
+ // The rpc-server port can be ephemeral... ensure we have the correct info
+ this.rpcAddress = this.server.getListenerAddress();
+ nn.setRpcServerAddress(conf, rpcAddress);
+ }
+
+ /**
+ * Actually start serving requests.
+ */
+ void start() {
+ server.start(); //start RPC server
+ if (serviceRpcServer != null) {
+ serviceRpcServer.start();
+ }
+ }
+
+ /**
+ * Wait until the RPC server has shut down.
+ */
+ void join() throws InterruptedException {
+ this.server.join();
+ }
+
+ void stop() {
+ if(server != null) server.stop();
+ if(serviceRpcServer != null) serviceRpcServer.stop();
+ }
+
+ InetSocketAddress getServiceRpcAddress() {
+ return serviceRPCAddress;
+ }
+
+ InetSocketAddress getRpcAddress() {
+ return rpcAddress;
+ }
+
+ @Override // VersionedProtocol
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ return ProtocolSignature.getProtocolSignature(
+ this, protocol, clientVersion, clientMethodsHash);
+ }
+
+ @Override
+ public long getProtocolVersion(String protocol,
+ long clientVersion) throws IOException {
+ if (protocol.equals(ClientProtocol.class.getName())) {
+ return ClientProtocol.versionID;
+ } else if (protocol.equals(DatanodeProtocol.class.getName())){
+ return DatanodeProtocol.versionID;
+ } else if (protocol.equals(NamenodeProtocol.class.getName())){
+ return NamenodeProtocol.versionID;
+ } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
+ return RefreshAuthorizationPolicyProtocol.versionID;
+ } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
+ return RefreshUserMappingsProtocol.versionID;
+ } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
+ return GetUserMappingsProtocol.versionID;
+ } else {
+ throw new IOException("Unknown protocol to name node: " + protocol);
+ }
+ }
+
+ /////////////////////////////////////////////////////
+ // NamenodeProtocol
+ /////////////////////////////////////////////////////
+ @Override // NamenodeProtocol
+ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
+ throws IOException {
+ if(size <= 0) {
+ throw new IllegalArgumentException(
+ "Unexpected not positive size: "+size);
+ }
+
+ return namesystem.getBlockManager().getBlocks(datanode, size);
+ }
+
+ @Override // NamenodeProtocol
+ public ExportedBlockKeys getBlockKeys() throws IOException {
+ return namesystem.getBlockManager().getBlockKeys();
+ }
+
+ @Override // NamenodeProtocol
+ public void errorReport(NamenodeRegistration registration,
+ int errorCode,
+ String msg) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ verifyRequest(registration);
+ LOG.info("Error report from " + registration + ": " + msg);
+ if(errorCode == FATAL)
+ namesystem.releaseBackupNode(registration);
+ }
+
+ @Override // NamenodeProtocol
+ public NamenodeRegistration register(NamenodeRegistration registration)
+ throws IOException {
+ verifyVersion(registration.getVersion());
+ NamenodeRegistration myRegistration = nn.setRegistration();
+ namesystem.registerBackupNode(registration, myRegistration);
+ return myRegistration;
+ }
+
+ @Override // NamenodeProtocol
+ public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
+ throws IOException {
+ verifyRequest(registration);
+ if(!nn.isRole(NamenodeRole.NAMENODE))
+ throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
+ return namesystem.startCheckpoint(registration, nn.setRegistration());
+ }
+
+ @Override // NamenodeProtocol
+ public void endCheckpoint(NamenodeRegistration registration,
+ CheckpointSignature sig) throws IOException {
+ nn.checkOperation(OperationCategory.CHECKPOINT);
+ namesystem.endCheckpoint(registration, sig);
+ }
+
+ @Override // ClientProtocol
+ public Token getDelegationToken(Text renewer)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ return namesystem.getDelegationToken(renewer);
+ }
+
+ @Override // ClientProtocol
+ public long renewDelegationToken(Token token)
+ throws InvalidToken, IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ return namesystem.renewDelegationToken(token);
+ }
+
+ @Override // ClientProtocol
+ public void cancelDelegationToken(Token token)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.cancelDelegationToken(token);
+ }
+
+ @Override // ClientProtocol
+ public LocatedBlocks getBlockLocations(String src,
+ long offset,
+ long length)
+ throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ metrics.incrGetBlockLocations();
+ return namesystem.getBlockLocations(getClientMachine(),
+ src, offset, length);
+ }
+
+ @Override // ClientProtocol
+ public FsServerDefaults getServerDefaults() throws IOException {
+ return namesystem.getServerDefaults();
+ }
+
+ @Override // ClientProtocol
+ public void create(String src,
+ FsPermission masked,
+ String clientName,
+ EnumSetWritable flag,
+ boolean createParent,
+ short replication,
+ long blockSize) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ String clientMachine = getClientMachine();
+ if (stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*DIR* NameNode.create: file "
+ +src+" for "+clientName+" at "+clientMachine);
+ }
+ if (!checkPathLength(src)) {
+ throw new IOException("create: Pathname too long. Limit "
+ + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+ }
+ namesystem.startFile(src,
+ new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
+ null, masked),
+ clientName, clientMachine, flag.get(), createParent, replication, blockSize);
+ metrics.incrFilesCreated();
+ metrics.incrCreateFileOps();
+ }
+
+ @Override // ClientProtocol
+ public LocatedBlock append(String src, String clientName)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ String clientMachine = getClientMachine();
+ if (stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*DIR* NameNode.append: file "
+ +src+" for "+clientName+" at "+clientMachine);
+ }
+ LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
+ metrics.incrFilesAppended();
+ return info;
+ }
+
+ @Override // ClientProtocol
+ public boolean recoverLease(String src, String clientName) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ String clientMachine = getClientMachine();
+ return namesystem.recoverLease(src, clientName, clientMachine);
+ }
+
+ @Override // ClientProtocol
+ public boolean setReplication(String src, short replication)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ return namesystem.setReplication(src, replication);
+ }
+
+ @Override // ClientProtocol
+ public void setPermission(String src, FsPermission permissions)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.setPermission(src, permissions);
+ }
+
+ @Override // ClientProtocol
+ public void setOwner(String src, String username, String groupname)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.setOwner(src, username, groupname);
+ }
+
+ @Override // ClientProtocol
+ public LocatedBlock addBlock(String src,
+ String clientName,
+ ExtendedBlock previous,
+ DatanodeInfo[] excludedNodes)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
+ +src+" for "+clientName);
+ }
+ HashMap excludedNodesSet = null;
+ if (excludedNodes != null) {
+ excludedNodesSet = new HashMap(excludedNodes.length);
+ for (Node node:excludedNodes) {
+ excludedNodesSet.put(node, node);
+ }
+ }
+ LocatedBlock locatedBlock =
+ namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
+ if (locatedBlock != null)
+ metrics.incrAddBlockOps();
+ return locatedBlock;
+ }
+
+ @Override // ClientProtocol
+ public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
+ final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
+ final int numAdditionalNodes, final String clientName
+ ) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("getAdditionalDatanode: src=" + src
+ + ", blk=" + blk
+ + ", existings=" + Arrays.asList(existings)
+ + ", excludes=" + Arrays.asList(excludes)
+ + ", numAdditionalNodes=" + numAdditionalNodes
+ + ", clientName=" + clientName);
+ }
+
+ metrics.incrGetAdditionalDatanodeOps();
+
+ HashMap excludeSet = null;
+ if (excludes != null) {
+ excludeSet = new HashMap(excludes.length);
+ for (Node node : excludes) {
+ excludeSet.put(node, node);
+ }
+ }
+ return namesystem.getAdditionalDatanode(src, blk,
+ existings, excludeSet, numAdditionalNodes, clientName);
+ }
+ /**
+ * The client needs to give up on the block.
+ */
+ @Override // ClientProtocol
+ public void abandonBlock(ExtendedBlock b, String src, String holder)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
+ +b+" of file "+src);
+ }
+ if (!namesystem.abandonBlock(b, src, holder)) {
+ throw new IOException("Cannot abandon block during write to " + src);
+ }
+ }
+
+ @Override // ClientProtocol
+ public boolean complete(String src, String clientName, ExtendedBlock last)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*DIR* NameNode.complete: "
+ + src + " for " + clientName);
+ }
+ return namesystem.completeFile(src, clientName, last);
+ }
+
+ /**
+ * The client has detected an error on the specified located blocks
+ * and is reporting them to the server. For now, the namenode will
+ * mark the block as corrupt. In the future we might
+ * check the blocks are actually corrupt.
+ */
+ @Override // ClientProtocol, DatanodeProtocol
+ public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
+ for (int i = 0; i < blocks.length; i++) {
+ ExtendedBlock blk = blocks[i].getBlock();
+ DatanodeInfo[] nodes = blocks[i].getLocations();
+ for (int j = 0; j < nodes.length; j++) {
+ DatanodeInfo dn = nodes[j];
+ namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn);
+ }
+ }
+ }
+
+ @Override // ClientProtocol
+ public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ return namesystem.updateBlockForPipeline(block, clientName);
+ }
+
+
+ @Override // ClientProtocol
+ public void updatePipeline(String clientName, ExtendedBlock oldBlock,
+ ExtendedBlock newBlock, DatanodeID[] newNodes)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes);
+ }
+
+ @Override // DatanodeProtocol
+ public void commitBlockSynchronization(ExtendedBlock block,
+ long newgenerationstamp, long newlength,
+ boolean closeFile, boolean deleteblock, DatanodeID[] newtargets)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.commitBlockSynchronization(block,
+ newgenerationstamp, newlength, closeFile, deleteblock, newtargets);
+ }
+
+ @Override // ClientProtocol
+ public long getPreferredBlockSize(String filename)
+ throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ return namesystem.getPreferredBlockSize(filename);
+ }
+
+ @Deprecated
+ @Override // ClientProtocol
+ public boolean rename(String src, String dst) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
+ }
+ if (!checkPathLength(dst)) {
+ throw new IOException("rename: Pathname too long. Limit "
+ + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+ }
+ boolean ret = namesystem.renameTo(src, dst);
+ if (ret) {
+ metrics.incrFilesRenamed();
+ }
+ return ret;
+ }
+
+ @Override // ClientProtocol
+ public void concat(String trg, String[] src) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.concat(trg, src);
+ }
+
+ @Override // ClientProtocol
+ public void rename(String src, String dst, Options.Rename... options)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
+ }
+ if (!checkPathLength(dst)) {
+ throw new IOException("rename: Pathname too long. Limit "
+ + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+ }
+ namesystem.renameTo(src, dst, options);
+ metrics.incrFilesRenamed();
+ }
+
+ @Deprecated
+ @Override // ClientProtocol
+ public boolean delete(String src) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ return delete(src, true);
+ }
+
+ @Override // ClientProtocol
+ public boolean delete(String src, boolean recursive) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if (stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*DIR* Namenode.delete: src=" + src
+ + ", recursive=" + recursive);
+ }
+ boolean ret = namesystem.delete(src, recursive);
+ if (ret)
+ metrics.incrDeleteFileOps();
+ return ret;
+ }
+
+ /**
+ * Check path length does not exceed maximum. Returns true if
+ * length and depth are okay. Returns false if length is too long
+ * or depth is too great.
+ */
+ private boolean checkPathLength(String src) {
+ Path srcPath = new Path(src);
+ return (src.length() <= MAX_PATH_LENGTH &&
+ srcPath.depth() <= MAX_PATH_DEPTH);
+ }
+
+ @Override // ClientProtocol
+ public boolean mkdirs(String src, FsPermission masked, boolean createParent)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
+ }
+ if (!checkPathLength(src)) {
+ throw new IOException("mkdirs: Pathname too long. Limit "
+ + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+ }
+ return namesystem.mkdirs(src,
+ new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
+ null, masked), createParent);
+ }
+
+ @Override // ClientProtocol
+ public void renewLease(String clientName) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.renewLease(clientName);
+ }
+
+ @Override // ClientProtocol
+ public DirectoryListing getListing(String src, byte[] startAfter,
+ boolean needLocation) throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ DirectoryListing files = namesystem.getListing(
+ src, startAfter, needLocation);
+ if (files != null) {
+ metrics.incrGetListingOps();
+ metrics.incrFilesInGetListingOps(files.getPartialListing().length);
+ }
+ return files;
+ }
+
+ @Override // ClientProtocol
+ public HdfsFileStatus getFileInfo(String src) throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ metrics.incrFileInfoOps();
+ return namesystem.getFileInfo(src, true);
+ }
+
+ @Override // ClientProtocol
+ public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ metrics.incrFileInfoOps();
+ return namesystem.getFileInfo(src, false);
+ }
+
+ @Override
+ public long[] getStats() {
+ return namesystem.getStats();
+ }
+
+ @Override // ClientProtocol
+ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
+ throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ DatanodeInfo results[] = namesystem.datanodeReport(type);
+ if (results == null ) {
+ throw new IOException("Cannot find datanode report");
+ }
+ return results;
+ }
+
+ @Override // ClientProtocol
+ public boolean setSafeMode(SafeModeAction action) throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ return namesystem.setSafeMode(action);
+ }
+ @Override // ClientProtocol
+ public boolean restoreFailedStorage(String arg)
+ throws AccessControlException {
+ // TODO:HA decide on OperationCategory for this
+ return namesystem.restoreFailedStorage(arg);
+ }
+
+ @Override // ClientProtocol
+ public void saveNamespace() throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ namesystem.saveNamespace();
+ }
+
+ @Override // ClientProtocol
+ public void refreshNodes() throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ namesystem.getBlockManager().getDatanodeManager().refreshNodes(
+ new HdfsConfiguration());
+ }
+
+ @Override // NamenodeProtocol
+ public long getTransactionID() {
+ // TODO:HA decide on OperationCategory for this
+ return namesystem.getEditLog().getSyncTxId();
+ }
+
+ @Override // NamenodeProtocol
+ public CheckpointSignature rollEditLog() throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ return namesystem.rollEditLog();
+ }
+
+ @Override // NamenodeProtocol
+ public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
+ throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ return namesystem.getEditLog().getEditLogManifest(sinceTxId);
+ }
+
+ @Override // ClientProtocol
+ public void finalizeUpgrade() throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ namesystem.finalizeUpgrade();
+ }
+
+ @Override // ClientProtocol
+ public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
+ throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ return namesystem.distributedUpgradeProgress(action);
+ }
+
+ @Override // ClientProtocol
+ public void metaSave(String filename) throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ namesystem.metaSave(filename);
+ }
+ @Override // ClientProtocol
+ public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
+ throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ Collection fbs =
+ namesystem.listCorruptFileBlocks(path, cookie);
+
+ String[] files = new String[fbs.size()];
+ String lastCookie = "";
+ int i = 0;
+ for(FSNamesystem.CorruptFileBlockInfo fb: fbs) {
+ files[i++] = fb.path;
+ lastCookie = fb.block.getBlockName();
+ }
+ return new CorruptFileBlocks(files, lastCookie);
+ }
+
+ /**
+ * Tell all datanodes to use a new, non-persistent bandwidth value for
+ * dfs.datanode.balance.bandwidthPerSec.
+ * @param bandwidth Blanacer bandwidth in bytes per second for all datanodes.
+ * @throws IOException
+ */
+ @Override // ClientProtocol
+ public void setBalancerBandwidth(long bandwidth) throws IOException {
+ // TODO:HA decide on OperationCategory for this
+ namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
+ }
+
+ @Override // ClientProtocol
+ public ContentSummary getContentSummary(String path) throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ return namesystem.getContentSummary(path);
+ }
+
+ @Override // ClientProtocol
+ public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.setQuota(path, namespaceQuota, diskspaceQuota);
+ }
+
+ @Override // ClientProtocol
+ public void fsync(String src, String clientName) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.fsync(src, clientName);
+ }
+
+ @Override // ClientProtocol
+ public void setTimes(String src, long mtime, long atime)
+ throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ namesystem.setTimes(src, mtime, atime);
+ }
+
+ @Override // ClientProtocol
+ public void createSymlink(String target, String link, FsPermission dirPerms,
+ boolean createParent) throws IOException {
+ nn.checkOperation(OperationCategory.WRITE);
+ metrics.incrCreateSymlinkOps();
+ /* We enforce the MAX_PATH_LENGTH limit even though a symlink target
+ * URI may refer to a non-HDFS file system.
+ */
+ if (!checkPathLength(link)) {
+ throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH +
+ " character limit");
+
+ }
+ if ("".equals(target)) {
+ throw new IOException("Invalid symlink target");
+ }
+ final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ namesystem.createSymlink(target, link,
+ new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent);
+ }
+
+ @Override // ClientProtocol
+ public String getLinkTarget(String path) throws IOException {
+ nn.checkOperation(OperationCategory.READ);
+ metrics.incrGetLinkTargetOps();
+ /* Resolves the first symlink in the given path, returning a
+ * new path consisting of the target of the symlink and any
+ * remaining path components from the original path.
+ */
+ try {
+ HdfsFileStatus stat = namesystem.getFileInfo(path, false);
+ if (stat != null) {
+ // NB: getSymlink throws IOException if !stat.isSymlink()
+ return stat.getSymlink();
+ }
+ } catch (UnresolvedPathException e) {
+ return e.getResolvedPath().toString();
+ } catch (UnresolvedLinkException e) {
+ // The NameNode should only throw an UnresolvedPathException
+ throw new AssertionError("UnresolvedLinkException thrown");
+ }
+ return null;
+ }
+
+
+ @Override // DatanodeProtocol
+ public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
+ throws IOException {
+ verifyVersion(nodeReg.getVersion());
+ namesystem.registerDatanode(nodeReg);
+
+ return nodeReg;
+ }
+
+ @Override // DatanodeProtocol
+ public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg,
+ long capacity, long dfsUsed, long remaining, long blockPoolUsed,
+ int xmitsInProgress, int xceiverCount, int failedVolumes)
+ throws IOException {
+ verifyRequest(nodeReg);
+ return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining,
+ blockPoolUsed, xceiverCount, xmitsInProgress, failedVolumes);
+ }
+
+ @Override // DatanodeProtocol
+ public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
+ String poolId, long[] blocks) throws IOException {
+ verifyRequest(nodeReg);
+ BlockListAsLongs blist = new BlockListAsLongs(blocks);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+ + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
+ + " blocks");
+ }
+
+ namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
+ if (nn.getFSImage().isUpgradeFinalized())
+ return new DatanodeCommand.Finalize(poolId);
+ return null;
+ }
+
+ @Override // DatanodeProtocol
+ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
+ ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks) throws IOException {
+ verifyRequest(nodeReg);
+ if(stateChangeLog.isDebugEnabled()) {
+ stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
+ +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length
+ +" blocks.");
+ }
+ namesystem.getBlockManager().blockReceivedAndDeleted(
+ nodeReg, poolId, receivedAndDeletedBlocks);
+ }
+
+ @Override // DatanodeProtocol
+ public void errorReport(DatanodeRegistration nodeReg,
+ int errorCode, String msg) throws IOException {
+ String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
+
+ if (errorCode == DatanodeProtocol.NOTIFY) {
+ LOG.info("Error report from " + dnName + ": " + msg);
+ return;
+ }
+ verifyRequest(nodeReg);
+
+ if (errorCode == DatanodeProtocol.DISK_ERROR) {
+ LOG.warn("Disk error on " + dnName + ": " + msg);
+ } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
+ LOG.warn("Fatal disk error on " + dnName + ": " + msg);
+ namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg);
+ } else {
+ LOG.info("Error report from " + dnName + ": " + msg);
+ }
+ }
+
+ @Override // DatanodeProtocol, NamenodeProtocol
+ public NamespaceInfo versionRequest() throws IOException {
+ return namesystem.getNamespaceInfo();
+ }
+
+ @Override // DatanodeProtocol
+ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException {
+ return namesystem.processDistributedUpgradeCommand(comm);
+ }
+
+ /**
+ * Verify request.
+ *
+ * Verifies correctness of the datanode version, registration ID, and
+ * if the datanode does not need to be shutdown.
+ *
+ * @param nodeReg data node registration
+ * @throws IOException
+ */
+ void verifyRequest(NodeRegistration nodeReg) throws IOException {
+ verifyVersion(nodeReg.getVersion());
+ if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
+ LOG.warn("Invalid registrationID - expected: "
+ + namesystem.getRegistrationID() + " received: "
+ + nodeReg.getRegistrationID());
+ throw new UnregisteredNodeException(nodeReg);
+ }
+ }
+
+ @Override // RefreshAuthorizationPolicyProtocol
+ public void refreshServiceAcl() throws IOException {
+ if (!serviceAuthEnabled) {
+ throw new AuthorizationException("Service Level Authorization not enabled!");
+ }
+
+ this.server.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
+ if (this.serviceRpcServer != null) {
+ this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
+ }
+ }
+
+ @Override // RefreshAuthorizationPolicyProtocol
+ public void refreshUserToGroupsMappings() throws IOException {
+ LOG.info("Refreshing all user-to-groups mappings. Requested by user: " +
+ UserGroupInformation.getCurrentUser().getShortUserName());
+ Groups.getUserToGroupsMappingService().refresh();
+ }
+
+ @Override // RefreshAuthorizationPolicyProtocol
+ public void refreshSuperUserGroupsConfiguration() {
+ LOG.info("Refreshing SuperUser proxy group mapping list ");
+
+ ProxyUsers.refreshSuperUserGroupsConfiguration();
+ }
+
+ @Override // GetUserMappingsProtocol
+ public String[] getGroupsForUser(String user) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Getting groups for user " + user);
+ }
+ return UserGroupInformation.createRemoteUser(user).getGroupNames();
+ }
+
+ @Override // HAServiceProtocol
+ public synchronized void monitorHealth() throws HealthCheckFailedException {
+ nn.monitorHealth();
+ }
+
+ @Override // HAServiceProtocol
+ public synchronized void transitionToActive() throws ServiceFailedException {
+ nn.transitionToActive();
+ }
+
+ @Override // HAServiceProtocol
+ public synchronized void transitionToStandby() throws ServiceFailedException {
+ nn.transitionToStandby();
+ }
+
+ /**
+ * Verify version.
+ *
+ * @param version
+ * @throws IOException
+ */
+ void verifyVersion(int version) throws IOException {
+ if (version != HdfsConstants.LAYOUT_VERSION)
+ throw new IncorrectVersionException(version, "data node");
+ }
+
+ private static String getClientMachine() {
+ String clientMachine = Server.getRemoteAddress();
+ if (clientMachine == null) {
+ clientMachine = "";
+ }
+ return clientMachine;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 26376d476f..358d778eaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -48,7 +48,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
@@ -173,7 +173,7 @@ public void fsck() {
out.println(msg);
namenode.getNamesystem().logFsckEvent(path, remoteAddress);
- final HdfsFileStatus file = namenode.getFileInfo(path);
+ final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
if (file != null) {
if (showCorruptFileBlocks) {
@@ -250,7 +250,8 @@ private void check(String parent, HdfsFileStatus file, Result res) throws IOExce
res.totalDirs++;
do {
assert lastReturnedName != null;
- thisListing = namenode.getListing(path, lastReturnedName, false);
+ thisListing = namenode.getRpcServer().getListing(
+ path, lastReturnedName, false);
if (thisListing == null) {
return;
}
@@ -385,7 +386,7 @@ private void check(String parent, HdfsFileStatus file, Result res) throws IOExce
break;
case FIXING_DELETE:
if (!isOpen)
- namenode.delete(path, true);
+ namenode.getRpcServer().delete(path, true);
}
}
if (showFiles) {
@@ -414,7 +415,8 @@ private void lostFoundMove(String parent, HdfsFileStatus file, LocatedBlocks blo
String target = lostFound + fullName;
String errmsg = "Failed to move " + fullName + " to /lost+found";
try {
- if (!namenode.mkdirs(target, file.getPermission(), true)) {
+ if (!namenode.getRpcServer().mkdirs(
+ target, file.getPermission(), true)) {
LOG.warn(errmsg);
return;
}
@@ -502,8 +504,8 @@ private void copyBlock(DFSClient dfs, LocatedBlock lblock,
}
try {
s = new Socket();
- s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
- s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
+ s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(),
block.getBlockId());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
index f50e1f8b9f..3d2fd8b0be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
@@ -40,7 +40,7 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -49,6 +49,7 @@
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
@@ -354,7 +355,7 @@ void generateHealthReport(JspWriter out, NameNode nn,
}
}
- static String getDelegationToken(final NameNode nn,
+ static String getDelegationToken(final NamenodeProtocols nn,
HttpServletRequest request, Configuration conf,
final UserGroupInformation ugi) throws IOException, InterruptedException {
Token token = ugi
@@ -381,7 +382,8 @@ static void redirectToRandomDataNode(ServletContext context,
.getAttribute(JspHelper.CURRENT_CONF);
final DatanodeID datanode = getRandomDatanode(nn);
UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
- String tokenString = getDelegationToken(nn, request, conf, ugi);
+ String tokenString = getDelegationToken(
+ nn.getRpcServer(), request, conf, ugi);
// if the user is defined, get a delegation token and stringify it
final String redirectLocation;
final String nodeToRedirect;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
index 5e2041cd38..ddd0acbbfb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
@@ -70,7 +70,7 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res
try {
long result = ugi.doAs(new PrivilegedExceptionAction() {
public Long run() throws Exception {
- return nn.renewDelegationToken(token);
+ return nn.getRpcServer().renewDelegationToken(token);
}
});
PrintStream os = new PrintStream(resp.getOutputStream());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index f126f17eeb..9c5ef6f2c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -45,8 +45,8 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -122,6 +122,8 @@ public class SecondaryNameNode implements Runnable {
/** checkpoint once every this many transactions, regardless of time */
private long checkpointTxnCount;
+ private FSNamesystem namesystem;
+
/** {@inheritDoc} */
public String toString() {
@@ -220,6 +222,8 @@ private void initialize(final Configuration conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs);
checkpointImage.recoverCreate(commandLineOpts.shouldFormat());
+
+ namesystem = new FSNamesystem(conf, checkpointImage);
// Initialize other scheduling parameters from the configuration
checkpointCheckPeriod = conf.getLong(
@@ -456,7 +460,7 @@ InetSocketAddress getNameNodeAddress() {
*/
private String getInfoServer() throws IOException {
URI fsName = FileSystem.getDefaultUri(conf);
- if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
+ if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
throw new IOException("This is not a DFS");
}
@@ -520,7 +524,7 @@ boolean doCheckpoint() throws IOException {
boolean loadImage = downloadCheckpointFiles(
fsName, checkpointImage, sig, manifest); // Fetch fsimage and edits
- doMerge(sig, manifest, loadImage, checkpointImage);
+ doMerge(sig, manifest, loadImage, checkpointImage, namesystem);
//
// Upload the new image into the NameNode. Then tell the Namenode
@@ -750,8 +754,7 @@ static class CheckpointStorage extends FSImage {
CheckpointStorage(Configuration conf,
Collection imageDirs,
Collection editsDirs) throws IOException {
- super(conf, (FSNamesystem)null, imageDirs, editsDirs);
- setFSNamesystem(new FSNamesystem(this, conf));
+ super(conf, imageDirs, editsDirs);
// the 2NN never writes edits -- it only downloads them. So
// we shouldn't have any editLog instance. Setting to null
@@ -793,7 +796,7 @@ void recoverCreate(boolean format) throws IOException {
StorageState curState;
try {
- curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
+ curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
@@ -837,7 +840,8 @@ void ensureCurrentDirExists() throws IOException {
static void doMerge(
CheckpointSignature sig, RemoteEditLogManifest manifest,
- boolean loadImage, FSImage dstImage) throws IOException {
+ boolean loadImage, FSImage dstImage, FSNamesystem dstNamesystem)
+ throws IOException {
NNStorage dstStorage = dstImage.getStorage();
dstStorage.setStorageInfo(sig);
@@ -848,11 +852,11 @@ static void doMerge(
sig.mostRecentCheckpointTxId + " even though it should have " +
"just been downloaded");
}
- dstImage.reloadFromImageFile(file);
+ dstImage.reloadFromImageFile(file, dstNamesystem);
}
- Checkpointer.rollForwardByApplyingLogs(manifest, dstImage);
- dstImage.saveFSImageInAllDirs(dstImage.getLastAppliedTxId());
+ Checkpointer.rollForwardByApplyingLogs(manifest, dstImage, dstNamesystem);
+ dstImage.saveFSImageInAllDirs(dstNamesystem, dstImage.getLastAppliedTxId());
dstStorage.writeAll();
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index 944e998ecf..cc8dccaf1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -27,7 +27,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -124,7 +124,7 @@ static void uploadImageFromStorage(String fsName,
static void getFileServer(OutputStream outstream, File localfile,
DataTransferThrottler throttler)
throws IOException {
- byte buf[] = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
+ byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
FileInputStream infile = null;
try {
infile = new FileInputStream(localfile);
@@ -139,7 +139,7 @@ static void getFileServer(OutputStream outstream, File localfile,
&& localfile.getAbsolutePath().contains("fsimage")) {
// Test sending image shorter than localfile
long len = localfile.length();
- buf = new byte[(int)Math.min(len/2, FSConstants.IO_FILE_BUFFER_SIZE)];
+ buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
@@ -179,7 +179,7 @@ static void getFileServer(OutputStream outstream, File localfile,
static MD5Hash getFileClient(String nnHostPort,
String queryString, List localPaths,
NNStorage dstStorage, boolean getChecksum) throws IOException {
- byte[] buf = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
+ byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
str.append(queryString);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
index b4e89e3fa1..a46efae8a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
@@ -19,9 +19,9 @@
import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.UpgradeManager;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -38,8 +38,8 @@
* and updates its status.
*/
class UpgradeManagerNamenode extends UpgradeManager {
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.NAME_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.NAME_NODE;
}
private final FSNamesystem namesystem;
@@ -66,7 +66,7 @@ public synchronized boolean startUpgrade() throws IOException {
this.broadcastCommand = currentUpgrades.first().startUpgrade();
NameNode.LOG.info("\n Distributed upgrade for NameNode version "
+ getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is started.");
+ + HdfsConstants.LAYOUT_VERSION + " is started.");
return true;
}
@@ -75,7 +75,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command
if(NameNode.LOG.isDebugEnabled()) {
NameNode.LOG.debug("\n Distributed upgrade for NameNode version "
+ getUpgradeVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is processing upgrade command: "
+ + HdfsConstants.LAYOUT_VERSION + " is processing upgrade command: "
+ command.getAction() + " status = " + getUpgradeStatus() + "%");
}
if(currentUpgrades == null) {
@@ -96,7 +96,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command
curUO.completeUpgrade();
NameNode.LOG.info("\n Distributed upgrade for NameNode version "
+ curUO.getVersion() + " to current LV "
- + FSConstants.LAYOUT_VERSION + " is complete.");
+ + HdfsConstants.LAYOUT_VERSION + " is complete.");
// proceede with the next one
currentUpgrades.remove(curUO);
if(currentUpgrades.isEmpty()) { // all upgrades are done
@@ -110,7 +110,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command
public synchronized void completeUpgrade() throws IOException {
// set and write new upgrade state into disk
- setUpgradeState(false, FSConstants.LAYOUT_VERSION);
+ setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);
namesystem.getFSImage().getStorage().writeAll();
currentUpgrades = null;
broadcastCommand = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
index 0872eb22c0..5a75554544 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
@@ -20,7 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeObject;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
@@ -44,8 +44,8 @@ public abstract class UpgradeObjectNamenode extends UpgradeObject {
public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command
) throws IOException;
- public HdfsConstants.NodeType getType() {
- return HdfsConstants.NodeType.NAME_NODE;
+ public HdfsServerConstants.NodeType getType() {
+ return HdfsServerConstants.NodeType.NAME_NODE;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
index 2ee1866617..a75701ef86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode.metrics;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
index 80426605a0..aa98ab19b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
/**
* Information sent by a subordinate name-node to the active name-node
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
index e847cfc371..cc33a04d1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
@@ -53,7 +53,7 @@ public NamespaceInfo() {
public NamespaceInfo(int nsID, String clusterID, String bpID,
long cT, int duVersion) {
- super(FSConstants.LAYOUT_VERSION, nsID, clusterID, cT);
+ super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
blockPoolID = bpID;
buildVersion = Storage.getBuildVersion();
this.distributedUpgradeVersion = duVersion;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
index 5b8ac59f37..c82494d5ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
@@ -22,15 +22,15 @@
import java.io.IOException;
import java.util.Comparator;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.Writable;
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
public class RemoteEditLog implements Writable, Comparable {
- private long startTxId = FSConstants.INVALID_TXID;
- private long endTxId = FSConstants.INVALID_TXID;
+ private long startTxId = HdfsConstants.INVALID_TXID;
+ private long endTxId = HdfsConstants.INVALID_TXID;
public RemoteEditLog() {
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
index cdf1d791d2..bf9b68b1b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 91cf9eec58..b4f4e7c4d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -40,9 +40,9 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.RPC;
@@ -115,7 +115,7 @@ public String getCommandName() {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET);
+ dfs.setQuota(path, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
}
}
@@ -161,7 +161,7 @@ public String getCommandName() {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, quota, FSConstants.QUOTA_DONT_SET);
+ dfs.setQuota(path, quota, HdfsConstants.QUOTA_DONT_SET);
}
}
@@ -200,7 +200,7 @@ public String getCommandName() {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_RESET);
+ dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
}
}
@@ -250,7 +250,7 @@ public String getCommandName() {
@Override
public void run(Path path) throws IOException {
- dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, quota);
+ dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, quota);
}
}
@@ -288,7 +288,7 @@ public void report() throws IOException {
long used = ds.getUsed();
long remaining = ds.getRemaining();
long presentCapacity = used + remaining;
- boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
+ boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
UpgradeStatusReport status =
dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
@@ -361,17 +361,17 @@ public void setSafeMode(String[] argv, int idx) throws IOException {
printUsage("-safemode");
return;
}
- FSConstants.SafeModeAction action;
+ HdfsConstants.SafeModeAction action;
Boolean waitExitSafe = false;
if ("leave".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_LEAVE;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE;
} else if ("enter".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_ENTER;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER;
} else if ("get".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_GET;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
} else if ("wait".equalsIgnoreCase(argv[idx])) {
- action = FSConstants.SafeModeAction.SAFEMODE_GET;
+ action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
waitExitSafe = true;
} else {
printUsage("-safemode");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
index c68cef6a1d..617b90026c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
@@ -28,7 +28,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import org.apache.hadoop.ipc.RemoteException;
import static org.junit.Assert.*;
@@ -212,7 +212,7 @@ public void testSetReplication() throws IOException {
public void testCreateLinkMaxPathLink() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
- final int maxPathLen = FSConstants.MAX_PATH_LENGTH;
+ final int maxPathLen = HdfsConstants.MAX_PATH_LENGTH;
final int dirLen = dir.toString().length() + 1;
int len = maxPathLen - dirLen;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
index a932f881a2..a437fffadd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
@@ -28,7 +28,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
@@ -108,11 +108,11 @@ public void testOldRenameWithQuota() throws Exception {
Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
- fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET,
- FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
+ HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
@@ -130,7 +130,7 @@ public void testOldRenameWithQuota() throws Exception {
* Test3: src exceeds quota and dst has *no* quota to accommodate rename
*/
// src1 has no quota to accommodate new rename node
- fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
oldRename(dst1, src1, false, true);
}
@@ -143,11 +143,11 @@ public void testRenameWithQuota() throws Exception {
Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
- fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET,
- FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
+ HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
@@ -170,7 +170,7 @@ public void testRenameWithQuota() throws Exception {
* rename to a destination that does not exist
*/
// src1 has no quota to accommodate new rename node
- fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, false, false, true, Rename.NONE);
/*
@@ -179,9 +179,9 @@ public void testRenameWithQuota() throws Exception {
* is same as quota needed by src.
*/
// src1 has no quota to accommodate new rename node
- fs.setQuota(src1.getParent(), 100, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET);
createFile(src1);
- fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, true, true, false, Rename.OVERWRITE);
}
@@ -208,7 +208,7 @@ public void testEditsLogOldRename() throws Exception {
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
oldRename(src1, dst1, true, false);
@@ -237,7 +237,7 @@ public void testEditsLogRename() throws Exception {
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
- fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
+ fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
rename(src1, dst1, true, true, false, Rename.OVERWRITE);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 25585cecbb..c61e65b6c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -28,7 +28,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
@@ -140,8 +140,8 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR
DatanodeInfo[] nodes = testBlock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
sock = new Socket();
- sock.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
- sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+ sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
+ sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
return BlockReaderFactory.newBlockReader(
sock, targetAddr.toString()+ ":" + block.getBlockId(), block,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 3d8b6f29f5..c7566d2c62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -54,7 +54,7 @@
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -63,7 +63,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -670,7 +670,7 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
- FSConstants.SMALL_BUFFER_SIZE));
+ HdfsConstants.SMALL_BUFFER_SIZE));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
index 8e19f45641..f82986f331 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
@@ -27,7 +27,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
import org.apache.hadoop.net.DNS;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 89627b71b9..e51401cfc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -47,8 +47,8 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
@@ -1025,6 +1025,14 @@ public NameNode getNameNode() {
return getNameNode(0);
}
+ /**
+ * Get an instance of the NameNode's RPC handler.
+ */
+ public NamenodeProtocols getNameNodeRpc() {
+ checkSingleNameNode();
+ return getNameNode(0).getRpcServer();
+ }
+
/**
* Gets the NameNode for the index. May be null.
*/
@@ -1361,7 +1369,15 @@ public boolean isNameNodeUp(int nnIndex) {
if (nameNode == null) {
return false;
}
- long[] sizes = nameNode.getStats();
+ long[] sizes;
+ try {
+ sizes = nameNode.getRpcServer().getStats();
+ } catch (IOException ioe) {
+ // This method above should never throw.
+ // It only throws IOE since it is exposed via RPC
+ throw new AssertionError("Unexpected IOE thrown: "
+ + StringUtils.stringifyException(ioe));
+ }
boolean isUp = false;
synchronized (this) {
isUp = ((!nameNode.isInSafeMode() || !waitSafeMode) && sizes[0] != 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
index d06d9766c3..1613e82ca2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
@@ -23,7 +23,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -85,7 +85,7 @@ public void testAbandonBlock() throws IOException {
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
DistributedFileSystem dfs = (DistributedFileSystem)fs;
// Setting diskspace quota to 3MB
- dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
+ dfs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
// Start writing a file with 2 replicas to ensure each datanode has one.
// Block Size is 1MB.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index a65e6a233f..e7988f99bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.io.IOUtils;
@@ -45,7 +46,7 @@ public class TestClientProtocolForPipelineRecovery {
try {
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
- NameNode namenode = cluster.getNameNode();
+ NamenodeProtocols namenode = cluster.getNameNodeRpc();
/* Test writing to finalized replicas */
Path file = new Path("dataprotocol.dat");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index 1d43ea7e6b..1407fd46a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -32,7 +32,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 05fa648653..9cc1b2999c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -25,7 +25,12 @@
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
+import java.net.SocketTimeoutException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.LongWritable;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.MessageDigest;
@@ -44,14 +49,22 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
import org.mockito.internal.stubbing.answers.ThrowsException;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -61,9 +74,51 @@
* properly in case of errors.
*/
public class TestDFSClientRetries extends TestCase {
+ private static final String ADDRESS = "0.0.0.0";
+ final static private int PING_INTERVAL = 1000;
+ final static private int MIN_SLEEP_TIME = 1000;
public static final Log LOG =
LogFactory.getLog(TestDFSClientRetries.class.getName());
-
+ final static private Configuration conf = new HdfsConfiguration();
+
+ private static class TestServer extends Server {
+ private boolean sleep;
+ private Class extends Writable> responseClass;
+
+ public TestServer(int handlerCount, boolean sleep) throws IOException {
+ this(handlerCount, sleep, LongWritable.class, null);
+ }
+
+ public TestServer(int handlerCount, boolean sleep,
+ Class extends Writable> paramClass,
+ Class extends Writable> responseClass)
+ throws IOException {
+ super(ADDRESS, 0, paramClass, handlerCount, conf);
+ this.sleep = sleep;
+ this.responseClass = responseClass;
+ }
+
+ @Override
+ public Writable call(String protocol, Writable param, long receiveTime)
+ throws IOException {
+ if (sleep) {
+ // sleep a bit
+ try {
+ Thread.sleep(PING_INTERVAL + MIN_SLEEP_TIME);
+ } catch (InterruptedException e) {}
+ }
+ if (responseClass != null) {
+ try {
+ return responseClass.newInstance();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ } else {
+ return param; // echo param as result
+ }
+ }
+ }
+
// writes 'len' bytes of data to out.
private static void writeData(OutputStream out, int len) throws IOException {
byte [] buf = new byte[4096*16];
@@ -80,8 +135,6 @@ private static void writeData(OutputStream out, int len) throws IOException {
*/
public void testWriteTimeoutAtDataNode() throws IOException,
InterruptedException {
- Configuration conf = new HdfsConfiguration();
-
final int writeTimeout = 100; //milliseconds.
// set a very short write timeout for datanode, so that tests runs fast.
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout);
@@ -136,10 +189,9 @@ public void testNotYetReplicatedErrors() throws IOException
{
final String exceptionMsg = "Nope, not replicated yet...";
final int maxRetries = 1; // Allow one retry (total of two calls)
- Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
- NameNode mockNN = mock(NameNode.class);
+ NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer