diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index d70af6832e..1be8d04850 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -249,8 +249,8 @@ setup () { echo "======================================================================" echo "" echo "" - echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1" - $MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1 + echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1" + $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1 if [[ $? != 0 ]] ; then echo "Trunk compilation is broken?" cleanupAndExit 1 @@ -366,14 +366,14 @@ checkJavadocWarnings () { echo "======================================================================" echo "" echo "" - echo "$MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1" + echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1" if [ -d hadoop-project ]; then (cd hadoop-project; $MVN install) fi if [ -d hadoop-common-project/hadoop-annotations ]; then (cd hadoop-common-project/hadoop-annotations; $MVN install) fi - $MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1 + $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1 javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'` echo "" echo "" @@ -404,8 +404,8 @@ checkJavacWarnings () { echo "======================================================================" echo "" echo "" - echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1" - $MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1 + echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1" + $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1 if [[ $? != 0 ]] ; then JIRA_COMMENT="$JIRA_COMMENT @@ -488,8 +488,8 @@ checkStyle () { echo "THIS IS NOT IMPLEMENTED YET" echo "" echo "" - echo "$MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess" - $MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess + echo "$MVN test checkstyle:checkstyle -DskipTests -D${PROJECT_NAME}PatchProcess" + $MVN test checkstyle:checkstyle -DskipTests -D${PROJECT_NAME}PatchProcess JIRA_COMMENT_FOOTER="Checkstyle results: $BUILD_URL/artifact/trunk/build/test/checkstyle-errors.html $JIRA_COMMENT_FOOTER" @@ -520,8 +520,8 @@ checkFindbugsWarnings () { echo "======================================================================" echo "" echo "" - echo "$MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess" - $MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess < /dev/null + echo "$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess" + $MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null if [ $? != 0 ] ; then JIRA_COMMENT="$JIRA_COMMENT diff --git a/hadoop-common-project/hadoop-auth/src/examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml similarity index 73% rename from hadoop-common-project/hadoop-auth/src/examples/pom.xml rename to hadoop-common-project/hadoop-auth-examples/pom.xml index 1ce2b81c3b..67f113ba2c 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -18,15 +18,15 @@ org.apache.hadoop hadoop-project 0.24.0-SNAPSHOT - ../hadoop-project + ../../hadoop-project org.apache.hadoop - hadoop-alfredo-examples + hadoop-auth-examples 0.24.0-SNAPSHOT war - Hadoop Alfredo Examples - Hadoop Alfredo - Java HTTP SPNEGO Examples + Apache Hadoop Auth Examples + Apache Hadoop Auth Examples - Java HTTP SPNEGO @@ -36,7 +36,7 @@ org.apache.hadoop - hadoop-alfredo + hadoop-auth compile @@ -53,6 +53,18 @@ + + maven-war-plugin + + hadoop-auth-examples + + + + maven-deploy-plugin + + true + + org.codehaus.mojo exec-maven-plugin @@ -64,7 +76,7 @@ - org.apache.hadoop.alfredo.examples.WhoClient + org.apache.hadoop.security.authentication.examples.WhoClient ${url} diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/RequestLoggerFilter.java b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java similarity index 98% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/RequestLoggerFilter.java rename to hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java index 015862d468..a9721c9eba 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/RequestLoggerFilter.java +++ b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.examples; +package org.apache.hadoop.security.authentication.examples; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoClient.java b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java similarity index 93% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoClient.java rename to hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java index fcbd7a23b4..2299ae1fd8 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoClient.java +++ b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java @@ -11,9 +11,9 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.examples; +package org.apache.hadoop.security.authentication.examples; -import org.apache.hadoop.alfredo.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import java.io.BufferedReader; import java.io.InputStreamReader; diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoServlet.java b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoServlet.java similarity index 96% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoServlet.java rename to hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoServlet.java index 2d703daf0f..aae38133bb 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/java/org/apache/hadoop/alfredo/examples/WhoServlet.java +++ b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoServlet.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.examples; +package org.apache.hadoop.security.authentication.examples; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/resources/log4j.properties b/hadoop-common-project/hadoop-auth-examples/src/main/resources/log4j.properties similarity index 92% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/resources/log4j.properties rename to hadoop-common-project/hadoop-auth-examples/src/main/resources/log4j.properties index 979be5cadc..5fa402026c 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/resources/log4j.properties +++ b/hadoop-common-project/hadoop-auth-examples/src/main/resources/log4j.properties @@ -16,4 +16,4 @@ log4j.appender.test.Target=System.out log4j.appender.test.layout=org.apache.log4j.PatternLayout log4j.appender.test.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n -log4j.logger.org.apache.hadoop.alfredo=DEBUG, test +log4j.logger.org.apache.hadoop.security.authentication=DEBUG, test diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/WEB-INF/web.xml b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/WEB-INF/web.xml similarity index 86% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/WEB-INF/web.xml rename to hadoop-common-project/hadoop-auth-examples/src/main/webapp/WEB-INF/web.xml index 400a25a9ad..e287abdd9e 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/WEB-INF/web.xml +++ b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/WEB-INF/web.xml @@ -16,7 +16,7 @@ whoServlet - org.apache.hadoop.alfredo.examples.WhoServlet + org.apache.hadoop.security.authentication.examples.WhoServlet @@ -36,12 +36,12 @@ requestLoggerFilter - org.apache.hadoop.alfredo.examples.RequestLoggerFilter + org.apache.hadoop.security.authentication.examples.RequestLoggerFilter anonymousFilter - org.apache.hadoop.alfredo.server.AuthenticationFilter + org.apache.hadoop.security.authentication.server.AuthenticationFilter type simple @@ -58,7 +58,7 @@ simpleFilter - org.apache.hadoop.alfredo.server.AuthenticationFilter + org.apache.hadoop.security.authentication.server.AuthenticationFilter type simple @@ -75,7 +75,7 @@ kerberosFilter - org.apache.hadoop.alfredo.server.AuthenticationFilter + org.apache.hadoop.security.authentication.server.AuthenticationFilter type kerberos diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/annonymous/index.html b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/annonymous/index.html similarity index 88% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/annonymous/index.html rename to hadoop-common-project/hadoop-auth-examples/src/main/webapp/annonymous/index.html index 54e040d0cc..73294e1d17 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/annonymous/index.html +++ b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/annonymous/index.html @@ -13,6 +13,6 @@ --> -

Hello Hadoop Alfredo Pseudo/Simple Authentication with anonymous users!

+

Hello Hadoop Auth Pseudo/Simple Authentication with anonymous users!

diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/index.html b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/index.html similarity index 93% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/index.html rename to hadoop-common-project/hadoop-auth-examples/src/main/webapp/index.html index f6b5737fd3..7c09261d4c 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/index.html +++ b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/index.html @@ -13,6 +13,6 @@ --> -

Hello Hadoop Alfredo Examples

+

Hello Hadoop Auth Examples!

diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/simple/index.html b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/kerberos/index.html similarity index 90% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/simple/index.html rename to hadoop-common-project/hadoop-auth-examples/src/main/webapp/kerberos/index.html index bb0aef5cc7..fec01f6921 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/simple/index.html +++ b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/kerberos/index.html @@ -13,6 +13,6 @@ --> -

Hello Hadoop Alfredo Pseudo/Simple Authentication!

+

Hello Hadoop Auth Kerberos SPNEGO Authentication!

diff --git a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/kerberos/index.html b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/simple/index.html similarity index 90% rename from hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/kerberos/index.html rename to hadoop-common-project/hadoop-auth-examples/src/main/webapp/simple/index.html index 39108400c0..7981219db3 100644 --- a/hadoop-common-project/hadoop-auth/src/examples/src/main/webapp/kerberos/index.html +++ b/hadoop-common-project/hadoop-auth-examples/src/main/webapp/simple/index.html @@ -13,6 +13,6 @@ --> -

Hello Hadoop Alfredo Kerberos SPNEGO Authentication!

+

Hello Hadoop Auth Pseudo/Simple Authentication!

diff --git a/hadoop-common-project/hadoop-auth/BUILDING.txt b/hadoop-common-project/hadoop-auth/BUILDING.txt index cbeaf54767..b81b71cbb3 100644 --- a/hadoop-common-project/hadoop-auth/BUILDING.txt +++ b/hadoop-common-project/hadoop-auth/BUILDING.txt @@ -1,20 +1,20 @@ -Build instructions for Hadoop Alfredo +Build instructions for Hadoop Auth Same as for Hadoop. -For more details refer to the Alfredo documentation pages. +For more details refer to the Hadoop Auth documentation pages. ----------------------------------------------------------------------------- Caveats: -* Alfredo has profile to enable Kerberos testcases (testKerberos) +* Hadoop Auth has profile to enable Kerberos testcases (testKerberos) To run Kerberos testcases a KDC, 2 kerberos principals and a keytab file - are required (refer to the Alfredo documentation pages for details). + are required (refer to the Hadoop Auth documentation pages for details). -* Alfredo does not have a distribution profile (dist) +* Hadoop Auth does not have a distribution profile (dist) -* Alfredo does not have a native code profile (native) +* Hadoop Auth does not have a native code profile (native) ----------------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-auth/README.txt b/hadoop-common-project/hadoop-auth/README.txt index a51f6d3586..efa95dd516 100644 --- a/hadoop-common-project/hadoop-auth/README.txt +++ b/hadoop-common-project/hadoop-auth/README.txt @@ -1,6 +1,6 @@ -Hadoop Alfredo, Java HTTP SPNEGO +Hadoop Auth, Java HTTP SPNEGO -Hadoop Alfredo is a Java library consisting of a client and a server +Hadoop Auth is a Java library consisting of a client and a server components to enable Kerberos SPNEGO authentication for HTTP. The client component is the AuthenticatedURL class. @@ -10,6 +10,6 @@ The server component is the AuthenticationFilter servlet filter class. Authentication mechanisms support is pluggable in both the client and the server components via interfaces. -In addition to Kerberos SPNEGO, Alfredo also supports Pseudo/Simple +In addition to Kerberos SPNEGO, Hadoop Auth also supports Pseudo/Simple authentication (trusting the value of the query string parameter 'user.name'). diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 66bdbfb6f6..9bcf629f03 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -21,13 +21,12 @@ ../../hadoop-project org.apache.hadoop - hadoop-alfredo + hadoop-auth 0.24.0-SNAPSHOT jar - Apache Hadoop Alfredo - Apache Hadoop Alfredo - Java HTTP SPNEGO - http://hadoop.apache.org/alfredo + Apache Hadoop Auth + Apache Hadoop Auth - Java HTTP SPNEGO yyyyMMdd diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java similarity index 97% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticatedURL.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java index 22a43b8454..5a446609c2 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java @@ -11,9 +11,9 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; -import org.apache.hadoop.alfredo.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import java.io.IOException; import java.net.HttpURLConnection; @@ -63,7 +63,7 @@ public class AuthenticatedURL { /** * Name of the HTTP cookie used for the authentication token between the client and the server. */ - public static final String AUTH_COOKIE = "alfredo.auth"; + public static final String AUTH_COOKIE = "hadoop.auth"; private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "="; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticationException.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java similarity index 95% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticationException.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java index ba91847665..13632fb1bc 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/AuthenticationException.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; /** * Exception thrown when an authentication error occurrs. diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/Authenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java similarity index 95% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/Authenticator.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java index 85f5d40530..7b23f20699 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/Authenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; import java.io.IOException; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java similarity index 97% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/KerberosAuthenticator.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java index 69a91f5081..b3dc6fe85c 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/KerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; import com.sun.security.auth.module.Krb5LoginModule; import org.apache.commons.codec.binary.Base64; @@ -48,17 +48,17 @@ public class KerberosAuthenticator implements Authenticator { /** * HTTP header used by the SPNEGO server endpoint during an authentication sequence. */ - public static String WWW_AUTHENTICATE = "WWW-Authenticate"; + public static final String WWW_AUTHENTICATE = "WWW-Authenticate"; /** * HTTP header used by the SPNEGO client endpoint during an authentication sequence. */ - public static String AUTHORIZATION = "Authorization"; + public static final String AUTHORIZATION = "Authorization"; /** * HTTP header prefix used by the SPNEGO client/server endpoints during an authentication sequence. */ - public static String NEGOTIATE = "Negotiate"; + public static final String NEGOTIATE = "Negotiate"; private static final String AUTH_HTTP_METHOD = "OPTIONS"; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/PseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java similarity index 97% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/PseudoAuthenticator.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java index fb7991d64f..dff7a31003 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/client/PseudoAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; import java.io.IOException; import java.net.HttpURLConnection; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java similarity index 95% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationFilter.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java index 2b39d7ee59..f7305d0282 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java @@ -11,12 +11,12 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticatedURL; -import org.apache.hadoop.alfredo.client.AuthenticationException; -import org.apache.hadoop.alfredo.util.Signer; -import org.apache.hadoop.alfredo.util.SignerException; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authentication.util.Signer; +import org.apache.hadoop.security.authentication.util.SignerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -103,6 +103,8 @@ public class AuthenticationFilter implements Filter { */ public static final String COOKIE_PATH = "cookie.path"; + private static final Random RAN = new Random(); + private Signer signer; private AuthenticationHandler authHandler; private boolean randomSecret; @@ -139,7 +141,7 @@ public void init(FilterConfig filterConfig) throws ServletException { } try { - Class klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName); + Class klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName); authHandler = (AuthenticationHandler) klass.newInstance(); authHandler.init(config); } catch (ClassNotFoundException ex) { @@ -151,7 +153,7 @@ public void init(FilterConfig filterConfig) throws ServletException { } String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET); if (signatureSecret == null) { - signatureSecret = Long.toString(new Random(System.currentTimeMillis()).nextLong()); + signatureSecret = Long.toString(RAN.nextLong()); randomSecret = true; LOG.warn("'signature.secret' configuration not set, using a random value as secret"); } @@ -237,7 +239,7 @@ public void destroy() { */ protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException { Properties props = new Properties(); - Enumeration names = filterConfig.getInitParameterNames(); + Enumeration names = filterConfig.getInitParameterNames(); while (names.hasMoreElements()) { String name = (String) names.nextElement(); if (name.startsWith(configPrefix)) { @@ -381,7 +383,7 @@ public Principal getUserPrincipal() { } /** - * Creates the Alfredo authentiation HTTP cookie. + * Creates the Hadoop authentiation HTTP cookie. *

* It sets the domain and path specified in the configuration. * diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java similarity index 96% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationHandler.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java index e79c938699..958680fcad 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java @@ -11,9 +11,9 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java similarity index 98% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationToken.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java index 0ae9947a8f..fd17249ce6 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/AuthenticationToken.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java @@ -11,9 +11,9 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import java.security.Principal; import java.util.Arrays; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java similarity index 97% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/KerberosAuthenticationHandler.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java index ee985d9cdd..121d96628b 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/KerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java @@ -11,13 +11,13 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticationException; -import org.apache.hadoop.alfredo.client.KerberosAuthenticator; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import com.sun.security.auth.module.Krb5LoginModule; import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.alfredo.util.KerberosName; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSCredential; import org.ietf.jgss.GSSManager; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/PseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java similarity index 95% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/PseudoAuthenticationHandler.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java index 4783c00822..f23b2d0381 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/server/PseudoAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java @@ -11,10 +11,10 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticationException; -import org.apache.hadoop.alfredo.client.PseudoAuthenticator; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.PseudoAuthenticator; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/KerberosName.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java similarity index 99% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/KerberosName.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java index 7d68e8cf20..6a7ae0e412 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/KerberosName.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java @@ -1,4 +1,4 @@ -package org.apache.hadoop.alfredo.util; +package org.apache.hadoop.security.authentication.util; /** * Licensed to the Apache Software Foundation (ASF) under one diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/Signer.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java similarity index 98% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/Signer.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java index aba73cbaee..10c9a8e238 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/Signer.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.util; +package org.apache.hadoop.security.authentication.util; import org.apache.commons.codec.binary.Base64; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/SignerException.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java similarity index 94% rename from hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/SignerException.java rename to hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java index 7bab225cf0..faf2007b0b 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/alfredo/util/SignerException.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.util; +package org.apache.hadoop.security.authentication.util; /** * Exception thrown by {@link Signer} when a string signature is invalid. diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm index 32d09d7c43..a2e015ae0c 100644 --- a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm +++ b/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm @@ -11,12 +11,12 @@ ~~ limitations under the License. See accompanying LICENSE file. --- - Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It + Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It --- --- ${maven.build.timestamp} -Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It +Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It \[ {{{./index.html}Go Back}} \] @@ -50,14 +50,14 @@ $ mvn test -PtestKerberos The following Maven <<<-D>>> options can be used to change the default values: - * <<>>: default value <> + * <<>>: default value <> - * <<>>: default value <> + * <<>>: default value <> - * <<>>: default value + * <<>>: default value <> (it must start 'HTTP/') - * <<>>: default value + * <<>>: default value <<${HOME}/${USER}.keytab>> ** Generating Documentation @@ -69,7 +69,7 @@ $ mvn package -Pdocs +---+ The generated documentation is available at - <<>>. + <<>>. \[ {{{./index.html}Go Back}} \] diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm index d4d18151c3..e42ee8b4c3 100644 --- a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm +++ b/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm @@ -11,30 +11,30 @@ ~~ limitations under the License. See accompanying LICENSE file. --- - Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side + Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side Configuration --- --- ${maven.build.timestamp} -Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side +Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side Configuration \[ {{{./index.html}Go Back}} \] * Server Side Configuration Setup - The {{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationFilter.html} - AuthenticationFilter filter}} is Alfredo's server side component. + The {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationFilter.html} + AuthenticationFilter filter}} is Hadoop Auth's server side component. This filter must be configured in front of all the web application resources that required authenticated requests. For example: - The Alfredo and dependent JAR files must be in the web application classpath - (commonly the <<>> directory). + The Hadoop Auth and dependent JAR files must be in the web application + classpath (commonly the <<>> directory). - Alfredo uses SLF4J-API for logging. Alfredo Maven POM dependencies define the - SLF4J API dependency but it does not define the dependency on a concrete + Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define + the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application @@ -47,7 +47,7 @@ Configuration * <<<[PREFIX.]type>>>: the authentication type keyword (<<>> or <<>>) or a - {{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationHandler.html} + {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationHandler.html} Authentication handler implementation}}. * <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated @@ -80,7 +80,7 @@ Configuration * <<<[PREFIX.]kerberos.keytab>>>: The path to the keytab file containing the credentials for the kerberos principal. For example: - <<>>. There is no default value. + <<>>. There is no default value. <>: @@ -90,7 +90,7 @@ Configuration kerberosFilter - org.apache.hadoop.alfredo.server.AuthenticationFilter + org.apache.hadoop.security.auth.server.AuthenticationFilter type kerberos @@ -113,7 +113,7 @@ Configuration kerberos.keytab - /tmp/alfredo.keytab + /tmp/auth.keytab @@ -146,7 +146,7 @@ Configuration simpleFilter - org.apache.hadoop.alfredo.server.AuthenticationFilter + org.apache.hadoop.security.auth.server.AuthenticationFilter type simple diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm index d17b7e8a98..7070862d9e 100644 --- a/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm +++ b/hadoop-common-project/hadoop-auth/src/site/apt/Examples.apt.vm @@ -11,16 +11,16 @@ ~~ limitations under the License. See accompanying LICENSE file. --- - Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples + Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples --- --- ${maven.build.timestamp} -Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples +Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples \[ {{{./index.html}Go Back}} \] -* Accessing a Alfredo protected URL Using a browser +* Accessing a Hadoop Auth protected URL Using a browser <> The browser must support HTTP Kerberos SPNEGO. For example, Firefox or Internet Explorer. @@ -31,7 +31,7 @@ Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples the domain of the web server that is HTTP Kerberos SPNEGO protected (if using multiple domains and hostname use comma to separate them). -* Accessing a Alfredo protected URL Using <<>> +* Accessing a Hadoop Auth protected URL Using <<>> <> The <<>> version must support GSS, run <<>>. @@ -48,10 +48,10 @@ Features: GSS-Negotiate IPv6 Largefile NTLM SSL libz +---+ $ kinit Please enter the password for tucu@LOCALHOST: -$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who +$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who Enter host password for user 'tucu': -Hello Alfredo! +Hello Hadoop Auth Examples! +---+ * The <<<--negotiate>>> option enables SPNEGO in <<>>. @@ -68,7 +68,7 @@ Hello Alfredo! +---+ ... -URL url = new URL("http://localhost:8080/alfredo/kerberos/who"); +URL url = new URL("http://localhost:8080/hadoop-auth/kerberos/who"); AuthenticatedURL.Token token = new AuthenticatedURL.Token(); ... HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection(); @@ -79,12 +79,12 @@ conn = new AuthenticatedURL(url, token).openConnection(); * Building and Running the Examples - Download Alfredo's source code, the examples are in the + Download Hadoop-Auth's source code, the examples are in the <<>> directory. ** Server Example: - Edit the <<>> and set the + Edit the <<>> and set the right configuration init parameters for the <<>> definition configured for Kerberos (the right Kerberos principal and keytab file must be specified). Refer to the {{{./Configuration.html}Configuration @@ -106,11 +106,11 @@ conn = new AuthenticatedURL(url, token).openConnection(); $ kinit Please enter the password for tucu@LOCALHOST: -$ curl http://localhost:8080/alfredo-examples/anonymous/who +$ curl http://localhost:8080/hadoop-auth-examples/anonymous/who -$ curl http://localhost:8080/alfredo-examples/simple/who?user.name=foo +$ curl http://localhost:8080/hadoop-auth-examples/simple/who?user.name=foo -$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who +$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who +---+ ** Accessing the server using the Java client example @@ -121,7 +121,7 @@ Please enter the password for tucu@LOCALHOST: $ cd examples -$ mvn exec:java -Durl=http://localhost:8080/alfredo-examples/kerberos/who +$ mvn exec:java -Durl=http://localhost:8080/hadoop-auth-examples/kerberos/who .... diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm index d070ff92b2..a2e7b5e915 100644 --- a/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm @@ -11,27 +11,27 @@ ~~ limitations under the License. See accompanying LICENSE file. --- - Hadoop Alfredo, Java HTTP SPNEGO ${project.version} + Hadoop Auth, Java HTTP SPNEGO ${project.version} --- --- ${maven.build.timestamp} -Hadoop Alfredo, Java HTTP SPNEGO ${project.version} +Hadoop Auth, Java HTTP SPNEGO ${project.version} - Hadoop Alfredo is a Java library consisting of a client and a server + Hadoop Auth is a Java library consisting of a client and a server components to enable Kerberos SPNEGO authentication for HTTP. - Alfredo also supports additional authentication mechanisms on the client + Hadoop Auth also supports additional authentication mechanisms on the client and the server side via 2 simple interfaces. * License - Alfredo is distributed under {{{http://www.apache.org/licenses/}Apache + Hadoop Auth is distributed under {{{http://www.apache.org/licenses/}Apache License 2.0}}. -* How Does Alfredo Works? +* How Does Auth Works? - Alfredo enforces authentication on protected resources, once authentiation + Hadoop Auth enforces authentication on protected resources, once authentiation has been established it sets a signed HTTP Cookie that contains an authentication token with the user name, user principal, authentication type and expiration time. diff --git a/hadoop-common-project/hadoop-auth/src/site/site.xml b/hadoop-common-project/hadoop-auth/src/site/site.xml index 483581dc9f..4fab0f0e1d 100644 --- a/hadoop-common-project/hadoop-auth/src/site/site.xml +++ b/hadoop-common-project/hadoop-auth/src/site/site.xml @@ -11,7 +11,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/KerberosTestUtils.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java similarity index 97% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/KerberosTestUtils.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java index ae720dbb79..92e1de5a26 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/KerberosTestUtils.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo; +package org.apache.hadoop.security.authentication; import com.sun.security.auth.module.Krb5LoginModule; @@ -34,7 +34,7 @@ * Test helper class for Java Kerberos setup. */ public class KerberosTestUtils { - private static final String PREFIX = "alfredo.test."; + private static final String PREFIX = "hadoop-auth.test."; public static final String REALM = PREFIX + "kerberos.realm"; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/AuthenticatorTestCase.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java similarity index 96% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/AuthenticatorTestCase.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java index c139fa5902..93c519808f 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/AuthenticatorTestCase.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java @@ -11,9 +11,9 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; -import org.apache.hadoop.alfredo.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import junit.framework.TestCase; import org.mortbay.jetty.Server; import org.mortbay.jetty.servlet.Context; @@ -57,6 +57,7 @@ protected Properties getConfiguration(String configPrefix, FilterConfig filterCo } } + @SuppressWarnings("serial") public static class TestServlet extends HttpServlet { @Override diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestAuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java similarity index 98% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestAuthenticatedURL.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java index f082fadfc8..525af62606 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestAuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; import junit.framework.TestCase; import org.mockito.Mockito; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java similarity index 88% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestKerberosAuthenticator.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java index 2fdb9bc253..f086870ee1 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestKerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java @@ -11,12 +11,12 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; -import org.apache.hadoop.alfredo.KerberosTestUtils; -import org.apache.hadoop.alfredo.server.AuthenticationFilter; -import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler; -import org.apache.hadoop.alfredo.server.KerberosAuthenticationHandler; +import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; +import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import java.net.HttpURLConnection; import java.net.URL; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestPseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java similarity index 93% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestPseudoAuthenticator.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java index 5d151c2337..807052e848 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/client/TestPseudoAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java @@ -11,10 +11,10 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.client; +package org.apache.hadoop.security.authentication.client; -import org.apache.hadoop.alfredo.server.AuthenticationFilter; -import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import java.net.HttpURLConnection; import java.net.URL; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java similarity index 98% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationFilter.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java index e450a5603f..415600e97e 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java @@ -11,11 +11,11 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticatedURL; -import org.apache.hadoop.alfredo.client.AuthenticationException; -import org.apache.hadoop.alfredo.util.Signer; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authentication.util.Signer; import junit.framework.TestCase; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java similarity index 96% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationToken.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java index 1c29a3364d..25f9100217 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestAuthenticationToken.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java @@ -11,9 +11,9 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import junit.framework.TestCase; public class TestAuthenticationToken extends TestCase { diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java similarity index 95% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestKerberosAuthenticationHandler.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java index 3089d1a659..8187c9ec66 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java @@ -11,11 +11,11 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.KerberosTestUtils; -import org.apache.hadoop.alfredo.client.AuthenticationException; -import org.apache.hadoop.alfredo.client.KerberosAuthenticator; +import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import junit.framework.TestCase; import org.apache.commons.codec.binary.Base64; import org.ietf.jgss.GSSContext; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestPseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java similarity index 94% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestPseudoAuthenticationHandler.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java index 3a05bd435d..dbc2c36833 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/server/TestPseudoAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java @@ -11,11 +11,11 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.server; +package org.apache.hadoop.security.authentication.server; -import org.apache.hadoop.alfredo.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import junit.framework.TestCase; -import org.apache.hadoop.alfredo.client.PseudoAuthenticator; +import org.apache.hadoop.security.authentication.client.PseudoAuthenticator; import org.mockito.Mockito; import javax.servlet.http.HttpServletRequest; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestKerberosName.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java similarity index 95% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestKerberosName.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java index 16a15aa647..b6c0b0fb2e 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestKerberosName.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java @@ -1,4 +1,4 @@ -package org.apache.hadoop.alfredo.util; +package org.apache.hadoop.security.authentication.util; /** * Licensed to the Apache Software Foundation (ASF) under one @@ -20,7 +20,7 @@ import java.io.IOException; -import org.apache.hadoop.alfredo.KerberosTestUtils; +import org.apache.hadoop.security.authentication.KerberosTestUtils; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.*; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestSigner.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java similarity index 97% rename from hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestSigner.java rename to hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java index c0236ba7c4..9b3d1a2a2a 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/alfredo/util/TestSigner.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. See accompanying LICENSE file. */ -package org.apache.hadoop.alfredo.util; +package org.apache.hadoop.security.authentication.util; import junit.framework.TestCase; diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 73fc8f82a8..d62fa2db3a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -2,6 +2,16 @@ Hadoop Change Log Trunk (unreleased changes) + IMPROVEMENTS + + HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm) + HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia) + + BUGS + + HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required + by Jersey (Alejandro Abdelnur via atm) + Release 0.23.0 - Unreleased INCOMPATIBLE CHANGES @@ -347,6 +357,20 @@ Release 0.23.0 - Unreleased HADOOP-7547. Add generic type in WritableComparable subclasses. (Uma Maheswara Rao G via szetszwo) + HADOOP-7579. Rename package names from alfredo to auth. + (Alejandro Abdelnur via szetszwo) + + HADOOP-7594. Support HTTP REST in HttpServer. (szetszwo) + + HADOOP-7552. FileUtil#fullyDelete doesn't throw IOE but lists it + in the throws clause. (eli) + + HADOOP-7580. Add a version of getLocalPathForWrite to LocalDirAllocator + which doesn't create dirs. (Chris Douglas & Siddharth Seth via acmurthy) + + HADOOP-7507. Allow ganglia metrics to include the metrics system tags + in the gmetric names. (Alejandro Abdelnur via todd) + OPTIMIZATIONS HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole @@ -533,6 +557,12 @@ Release 0.23.0 - Unreleased HADOOP-7560. Change src layout to be heirarchical. (Alejandro Abdelnur via acmurthy) + HADOOP-7576. Fix findbugs warnings and javac warnings in hadoop-auth. + (szetszwo) + + HADOOP-7593. Fix AssertionError in TestHttpServer.testMaxThreads(). + (Uma Maheswara Rao G via szetszwo) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 1dbc2a908f..6c1c00edac 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -92,6 +92,28 @@ jetty-util compile + + + asm + asm + compile + + + com.sun.jersey + jersey-core + compile + + + com.sun.jersey + jersey-json + compile + + + com.sun.jersey + jersey-server + compile + + tomcat jasper-compiler @@ -239,7 +261,7 @@ org.apache.hadoop - hadoop-alfredo + hadoop-auth compile diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties index a6e2f0b16f..f2826b69a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties @@ -43,6 +43,16 @@ #*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both #*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 +# Tag values to use for the ganglia prefix. If not defined no tags are used. +# If '*' all tags are used. If specifiying multiple tags separate them with +# commas. Note that the last segment of the property name is the context name. +# +#*.sink.ganglia.tagsForPrefix.jvm=ProcesName +#*.sink.ganglia.tagsForPrefix.dfs= +#*.sink.ganglia.tagsForPrefix.rpc= +#*.sink.ganglia.tagsForPrefix.mapred= +#*.sink.ganglia.tagsForPrefix.fairscheduler= + #namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 #datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index e3338c783a..ede0c93480 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -149,3 +149,25 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false #log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd #log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout #log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n + +# +# Yarn ResourceManager Application Summary Log +# +# Set the ResourceManager summary log filename +#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log +# Set the ResourceManager summary log level and appender +#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY + +# Appender for ResourceManager Application Summary Log - rolled daily +# Requires the following properties to be set +# - hadoop.log.dir (Hadoop Log directory) +# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) +# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender) + +#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} +#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false +#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender +#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file} +#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout +#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 023ec69cdb..8e7aa302a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -28,7 +28,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; @@ -88,7 +87,7 @@ public static Path[] stat2Paths(FileStatus[] stats, Path path) { * (4) If dir is a normal directory, then dir and all its contents recursively * are deleted. */ - public static boolean fullyDelete(File dir) throws IOException { + public static boolean fullyDelete(File dir) { if (dir.delete()) { // dir is (a) normal file, (b) symlink to a file, (c) empty directory or // (d) symlink to a directory @@ -108,7 +107,7 @@ public static boolean fullyDelete(File dir) throws IOException { * If dir is a symlink to a directory, all the contents of the actual * directory pointed to by dir will be deleted. */ - public static boolean fullyDeleteContents(File dir) throws IOException { + public static boolean fullyDeleteContents(File dir) { boolean deletionSucceeded = true; File contents[] = dir.listFiles(); if (contents != null) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java index 3753b2b9a3..71c8235757 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java @@ -128,8 +128,26 @@ public Path getLocalPathForWrite(String pathStr, */ public Path getLocalPathForWrite(String pathStr, long size, Configuration conf) throws IOException { + return getLocalPathForWrite(pathStr, size, conf, true); + } + + /** Get a path from the local FS. Pass size as + * SIZE_UNKNOWN if not known apriori. We + * round-robin over the set of disks (via the configured dirs) and return + * the first complete path which has enough space + * @param pathStr the requested path (this will be created on the first + * available disk) + * @param size the size of the file that is going to be written + * @param conf the Configuration object + * @param checkWrite ensure that the path is writable + * @return the complete path to the file on a local disk + * @throws IOException + */ + public Path getLocalPathForWrite(String pathStr, long size, + Configuration conf, + boolean checkWrite) throws IOException { AllocatorPerContext context = obtainContext(contextCfgItemName); - return context.getLocalPathForWrite(pathStr, size, conf); + return context.getLocalPathForWrite(pathStr, size, conf, checkWrite); } /** Get a path from the local FS for reading. We search through all the @@ -145,6 +163,23 @@ public Path getLocalPathToRead(String pathStr, AllocatorPerContext context = obtainContext(contextCfgItemName); return context.getLocalPathToRead(pathStr, conf); } + + /** + * Get all of the paths that currently exist in the working directories. + * @param pathStr the path underneath the roots + * @param conf the configuration to look up the roots in + * @return all of the paths that exist under any of the roots + * @throws IOException + */ + public Iterable getAllLocalPathsToRead(String pathStr, + Configuration conf + ) throws IOException { + AllocatorPerContext context; + synchronized (this) { + context = obtainContext(contextCfgItemName); + } + return context.getAllLocalPathsToRead(pathStr, conf); + } /** Creates a temporary file in the local FS. Pass size as -1 if not known * apriori. We round-robin over the set of disks (via the configured dirs) @@ -214,7 +249,8 @@ public AllocatorPerContext(String contextCfgItemName) { /** This method gets called everytime before any read/write to make sure * that any change to localDirs is reflected immediately. */ - private void confChanged(Configuration conf) throws IOException { + private synchronized void confChanged(Configuration conf) + throws IOException { String newLocalDirs = conf.get(contextCfgItemName); if (!newLocalDirs.equals(savedLocalDirs)) { localDirs = conf.getTrimmedStrings(contextCfgItemName); @@ -251,18 +287,22 @@ private void confChanged(Configuration conf) throws IOException { } } - private Path createPath(String path) throws IOException { + private Path createPath(String path, + boolean checkWrite) throws IOException { Path file = new Path(new Path(localDirs[dirNumLastAccessed]), path); - //check whether we are able to create a directory here. If the disk - //happens to be RDONLY we will fail - try { - DiskChecker.checkDir(new File(file.getParent().toUri().getPath())); - return file; - } catch (DiskErrorException d) { - LOG.warn("Disk Error Exception: ", d); - return null; + if (checkWrite) { + //check whether we are able to create a directory here. If the disk + //happens to be RDONLY we will fail + try { + DiskChecker.checkDir(new File(file.getParent().toUri().getPath())); + return file; + } catch (DiskErrorException d) { + LOG.warn("Disk Error Exception: ", d); + return null; + } } + return file; } /** @@ -272,17 +312,6 @@ private Path createPath(String path) throws IOException { int getCurrentDirectoryIndex() { return dirNumLastAccessed; } - - /** Get a path from the local FS. This method should be used if the size of - * the file is not known a priori. - * - * It will use roulette selection, picking directories - * with probability proportional to their available space. - */ - public synchronized Path getLocalPathForWrite(String path, - Configuration conf) throws IOException { - return getLocalPathForWrite(path, SIZE_UNKNOWN, conf); - } /** Get a path from the local FS. If size is known, we go * round-robin over the set of disks (via the configured dirs) and return @@ -292,7 +321,7 @@ public synchronized Path getLocalPathForWrite(String path, * with probability proportional to their available space. */ public synchronized Path getLocalPathForWrite(String pathStr, long size, - Configuration conf) throws IOException { + Configuration conf, boolean checkWrite) throws IOException { confChanged(conf); int numDirs = localDirs.length; int numDirsSearched = 0; @@ -324,7 +353,7 @@ public synchronized Path getLocalPathForWrite(String pathStr, long size, dir++; } dirNumLastAccessed = dir; - returnPath = createPath(pathStr); + returnPath = createPath(pathStr, checkWrite); if (returnPath == null) { totalAvailable -= availableOnDisk[dir]; availableOnDisk[dir] = 0; // skip this disk @@ -335,7 +364,7 @@ public synchronized Path getLocalPathForWrite(String pathStr, long size, while (numDirsSearched < numDirs && returnPath == null) { long capacity = dirDF[dirNumLastAccessed].getAvailable(); if (capacity > size) { - returnPath = createPath(pathStr); + returnPath = createPath(pathStr, checkWrite); } dirNumLastAccessed++; dirNumLastAccessed = dirNumLastAccessed % numDirs; @@ -361,7 +390,7 @@ public File createTmpFileForWrite(String pathStr, long size, Configuration conf) throws IOException { // find an appropriate directory - Path path = getLocalPathForWrite(pathStr, size, conf); + Path path = getLocalPathForWrite(pathStr, size, conf, true); File dir = new File(path.getParent().toUri().getPath()); String prefix = path.getName(); @@ -398,6 +427,74 @@ public synchronized Path getLocalPathToRead(String pathStr, " the configured local directories"); } + private static class PathIterator implements Iterator, Iterable { + private final FileSystem fs; + private final String pathStr; + private int i = 0; + private final String[] rootDirs; + private Path next = null; + + private PathIterator(FileSystem fs, String pathStr, String[] rootDirs) + throws IOException { + this.fs = fs; + this.pathStr = pathStr; + this.rootDirs = rootDirs; + advance(); + } + + @Override + public boolean hasNext() { + return next != null; + } + + private void advance() throws IOException { + while (i < rootDirs.length) { + next = new Path(rootDirs[i++], pathStr); + if (fs.exists(next)) { + return; + } + } + next = null; + } + + @Override + public Path next() { + Path result = next; + try { + advance(); + } catch (IOException ie) { + throw new RuntimeException("Can't check existance of " + next, ie); + } + return result; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("read only iterator"); + } + + @Override + public Iterator iterator() { + return this; + } + } + + /** + * Get all of the paths that currently exist in the working directories. + * @param pathStr the path underneath the roots + * @param conf the configuration to look up the roots in + * @return all of the paths that exist under any of the roots + * @throws IOException + */ + synchronized Iterable getAllLocalPathsToRead(String pathStr, + Configuration conf) throws IOException { + confChanged(conf); + if (pathStr.startsWith("/")) { + pathStr = pathStr.substring(1); + } + return new PathIterator(localFS, pathStr, localDirs); + } + /** We search through all the configured dirs for the file's existence * and return true when we find one */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index 63579980cd..fe40d8bc4b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -228,10 +228,10 @@ public void write(int b) throws IOException { public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { if (!exists(f)) { - throw new FileNotFoundException("File " + f + " not found."); + throw new FileNotFoundException("File " + f + " not found"); } if (getFileStatus(f).isDirectory()) { - throw new IOException("Cannot append to a diretory (=" + f + " )."); + throw new IOException("Cannot append to a diretory (=" + f + " )"); } return new FSDataOutputStream(new BufferedOutputStream( new LocalFSFileOutputStream(f, true), bufferSize), statistics); @@ -242,7 +242,7 @@ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { if (exists(f) && !overwrite) { - throw new IOException("File already exists:"+f); + throw new IOException("File already exists: "+f); } Path parent = f.getParent(); if (parent != null && !mkdirs(parent)) { @@ -271,11 +271,18 @@ public boolean rename(Path src, Path dst) throws IOException { return FileUtil.copy(this, src, this, dst, true, getConf()); } + /** + * Delete the given path to a file or directory. + * @param p the path to delete + * @param recursive to delete sub-directories + * @return true if the file or directory and all its contents were deleted + * @throws IOException if p is non-empty and recursive is false + */ public boolean delete(Path p, boolean recursive) throws IOException { File f = pathToFile(p); if (f.isFile()) { return f.delete(); - } else if ((!recursive) && f.isDirectory() && + } else if (!recursive && f.isDirectory() && (FileUtil.listFiles(f).length != 0)) { throw new IOException("Directory " + f.toString() + " is not empty"); } @@ -287,7 +294,7 @@ public FileStatus[] listStatus(Path f) throws IOException { FileStatus[] results; if (!localf.exists()) { - throw new FileNotFoundException("File " + f + " does not exist."); + throw new FileNotFoundException("File " + f + " does not exist"); } if (localf.isFile()) { return new FileStatus[] { @@ -421,7 +428,7 @@ public FileStatus getFileStatus(Path f) throws IOException { if (path.exists()) { return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this); } else { - throw new FileNotFoundException("File " + f + " does not exist."); + throw new FileNotFoundException("File " + f + " does not exist"); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index d5deb7df92..00cdf32746 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -48,16 +48,12 @@ import org.apache.hadoop.conf.ConfServlet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.http.AdminAuthorizedServlet; -import org.apache.hadoop.http.FilterContainer; -import org.apache.hadoop.http.FilterInitializer; -import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.jmx.JMXJsonServlet; import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.metrics.MetricsServlet; import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.ReflectionUtils; import org.mortbay.io.Buffer; @@ -79,6 +75,8 @@ import org.mortbay.thread.QueuedThreadPool; import org.mortbay.util.MultiException; +import com.sun.jersey.spi.container.servlet.ServletContainer; + /** * Create a Jetty embedded server to answer http requests. The primary goal * is to serve up status information for the server. @@ -178,7 +176,7 @@ public HttpServer(String name, String bindAddress, int port, int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1); // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the - // default value (currently 254). + // default value (currently 250). QueuedThreadPool threadPool = maxThreads == -1 ? new QueuedThreadPool() : new QueuedThreadPool(maxThreads); webServer.setThreadPool(threadPool); @@ -325,6 +323,22 @@ public void setAttribute(String name, Object value) { webAppContext.setAttribute(name, value); } + /** + * Add a Jersey resource package. + * @param packageName The Java package name containing the Jersey resource. + * @param pathSpec The path spec for the servlet + */ + public void addJerseyResourcePackage(final String packageName, + final String pathSpec) { + LOG.info("addJerseyResourcePackage: packageName=" + packageName + + ", pathSpec=" + pathSpec); + final ServletHolder sh = new ServletHolder(ServletContainer.class); + sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass", + "com.sun.jersey.api.core.PackagesResourceConfig"); + sh.setInitParameter("com.sun.jersey.config.property.packages", packageName); + webAppContext.addServlet(sh, pathSpec); + } + /** * Add a servlet in the server. * @param name The name of the servlet (can be passed as null) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 9c35e8e9c4..c339ce7eaa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -285,8 +285,8 @@ public Connection(ConnectionId remoteId) throws IOException { authMethod = AuthMethod.KERBEROS; } - header = new ConnectionHeader(protocol == null ? null : protocol - .getName(), ticket, authMethod); + header = + new ConnectionHeader(RPC.getProtocolName(protocol), ticket, authMethod); if (LOG.isDebugEnabled()) LOG.debug("Use " + authMethod + " authentication for protocol " diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java new file mode 100644 index 0000000000..924fa8b150 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ipc; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + + +/** + * The protocol name that is used when a client and server connect. + * By default the class name of the protocol interface is the protocol name. + * + * Why override the default name (i.e. the class name)? + * One use case overriding the default name (i.e. the class name) is when + * there are multiple implementations of the same protocol, each with say a + * different version/serialization. + * In Hadoop this is used to allow multiple server and client adapters + * for different versions of the same protocol service. + */ +@Retention(RetentionPolicy.RUNTIME) +public @interface ProtocolInfo { + String protocolName(); // the name of the protocol (i.e. rpc service) +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index b42b9133f5..453a5dd175 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -62,6 +62,20 @@ */ public class RPC { static final Log LOG = LogFactory.getLog(RPC.class); + + + /** + * Get the protocol name. + * If the protocol class has a ProtocolAnnotation, then get the protocol + * name from the annotation; otherwise the class name is the protocol name. + */ + static public String getProtocolName(Class protocol) { + if (protocol == null) { + return null; + } + ProtocolInfo anno = (ProtocolInfo) protocol.getAnnotation(ProtocolInfo.class); + return (anno == null) ? protocol.getName() : anno.protocolName(); + } private RPC() {} // no public ctor @@ -553,8 +567,10 @@ public static Server getServer(Class protocol, } /** Construct a server for a protocol implementation instance. */ - public static Server getServer(Class protocol, - Object instance, String bindAddress, int port, + + public static + Server getServer(Class protocol, + IMPL instance, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf, SecretManager secretManager) @@ -576,6 +592,18 @@ protected Server(String bindAddress, int port, super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler, conf, serverName, secretManager); } + + /** + * Add a protocol to the existing server. + * @param protocolClass - the protocol class + * @param protocolImpl - the impl of the protocol that will be called + * @return the server (for convenience) + */ + public + Server addProtocol(Class protocolClass, IMPL protocolImpl + ) throws IOException { + throw new IOException("addProtocol Not Implemented"); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 96ec07929f..0bfc5722f4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -900,7 +900,7 @@ public class Connection { private InetAddress addr; ConnectionHeader header = new ConnectionHeader(); - Class protocol; + String protocolName; boolean useSasl; SaslServer saslServer; private AuthMethod authMethod; @@ -1287,15 +1287,8 @@ private void processHeader(byte[] buf) throws IOException { DataInputStream in = new DataInputStream(new ByteArrayInputStream(buf)); header.readFields(in); - try { - String protocolClassName = header.getProtocol(); - if (protocolClassName != null) { - protocol = getProtocolClass(header.getProtocol(), conf); - rpcDetailedMetrics.init(protocol); - } - } catch (ClassNotFoundException cnfe) { - throw new IOException("Unknown protocol: " + header.getProtocol()); - } + protocolName = header.getProtocol(); + UserGroupInformation protocolUser = header.getUgi(); if (!useSasl) { @@ -1484,7 +1477,7 @@ public void run() { // Make the call as the user via Subject.doAs, thus associating // the call with the Subject if (call.connection.user == null) { - value = call(call.connection.protocol, call.param, + value = call(call.connection.protocolName, call.param, call.timestamp); } else { value = @@ -1493,7 +1486,7 @@ public void run() { @Override public Writable run() throws Exception { // make the call - return call(call.connection.protocol, + return call(call.connection.protocolName, call.param, call.timestamp); } @@ -1753,7 +1746,7 @@ public synchronized InetSocketAddress getListenerAddress() { /** * Called for each call. - * @deprecated Use {@link #call(Class, Writable, long)} instead + * @deprecated Use {@link #call(String, Writable, long)} instead */ @Deprecated public Writable call(Writable param, long receiveTime) throws IOException { @@ -1761,7 +1754,7 @@ public Writable call(Writable param, long receiveTime) throws IOException { } /** Called for each call. */ - public abstract Writable call(Class protocol, + public abstract Writable call(String protocol, Writable param, long receiveTime) throws IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java index 4558f2150d..4d02027a0e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java @@ -34,7 +34,6 @@ public interface VersionedProtocol { * @return the version that the server will speak * @throws IOException if any IO error occurs */ - @Deprecated public long getProtocolVersion(String protocol, long clientVersion) throws IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java index e587913923..b28949d99a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java @@ -27,6 +27,9 @@ import java.net.InetSocketAddress; import java.io.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.HashMap; @@ -35,6 +38,7 @@ import org.apache.commons.logging.*; import org.apache.hadoop.io.*; +import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; @@ -47,10 +51,46 @@ public class WritableRpcEngine implements RpcEngine { private static final Log LOG = LogFactory.getLog(RPC.class); + + /** + * Get all superInterfaces that extend VersionedProtocol + * @param childInterfaces + * @return the super interfaces that extend VersionedProtocol + */ + private static Class[] getSuperInterfaces(Class[] childInterfaces) { + List> allInterfaces = new ArrayList>(); + + for (Class childInterface : childInterfaces) { + if (VersionedProtocol.class.isAssignableFrom(childInterface)) { + allInterfaces.add(childInterface); + allInterfaces.addAll( + Arrays.asList( + getSuperInterfaces(childInterface.getInterfaces()))); + } else { + LOG.warn("Interface " + childInterface + + " ignored because it does not extend VersionedProtocol"); + } + } + return (Class[]) allInterfaces.toArray(new Class[allInterfaces.size()]); + } + + /** + * Get all interfaces that the given protocol implements or extends + * which are assignable from VersionedProtocol. + */ + private static Class[] getProtocolInterfaces(Class protocol) { + Class[] interfaces = protocol.getInterfaces(); + return getSuperInterfaces(interfaces); + } + + //writableRpcVersion should be updated if there is a change //in format of the rpc messages. - public static long writableRpcVersion = 1L; + + // 2L - added declared class to Invocation + public static final long writableRpcVersion = 2L; + /** A method invocation, including the method name and its parameters.*/ private static class Invocation implements Writable, Configurable { private String methodName; @@ -59,11 +99,13 @@ private static class Invocation implements Writable, Configurable { private Configuration conf; private long clientVersion; private int clientMethodsHash; + private String declaringClassProtocolName; //This could be different from static writableRpcVersion when received //at server, if client is using a different version. private long rpcVersion; + @SuppressWarnings("unused") // called when deserializing an invocation public Invocation() {} public Invocation(Method method, Object[] parameters) { @@ -88,6 +130,8 @@ public Invocation(Method method, Object[] parameters) { this.clientMethodsHash = ProtocolSignature.getFingerprint(method .getDeclaringClass().getMethods()); } + this.declaringClassProtocolName = + RPC.getProtocolName(method.getDeclaringClass()); } /** The name of the method invoked. */ @@ -103,6 +147,7 @@ private long getProtocolVersion() { return clientVersion; } + @SuppressWarnings("unused") private int getClientMethodsHash() { return clientMethodsHash; } @@ -115,8 +160,10 @@ public long getRpcVersion() { return rpcVersion; } + @SuppressWarnings("deprecation") public void readFields(DataInput in) throws IOException { rpcVersion = in.readLong(); + declaringClassProtocolName = UTF8.readString(in); methodName = UTF8.readString(in); clientVersion = in.readLong(); clientMethodsHash = in.readInt(); @@ -124,13 +171,16 @@ public void readFields(DataInput in) throws IOException { parameterClasses = new Class[parameters.length]; ObjectWritable objectWritable = new ObjectWritable(); for (int i = 0; i < parameters.length; i++) { - parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf); + parameters[i] = + ObjectWritable.readObject(in, objectWritable, this.conf); parameterClasses[i] = objectWritable.getDeclaredClass(); } } + @SuppressWarnings("deprecation") public void write(DataOutput out) throws IOException { out.writeLong(rpcVersion); + UTF8.writeString(out, declaringClassProtocolName); UTF8.writeString(out, methodName); out.writeLong(clientVersion); out.writeInt(clientMethodsHash); @@ -273,30 +323,161 @@ public Object[] call(Method method, Object[][] params, /** Construct a server for a protocol implementation instance listening on a * port and address. */ - public Server getServer(Class protocol, - Object instance, String bindAddress, int port, - int numHandlers, int numReaders, int queueSizePerHandler, - boolean verbose, Configuration conf, + public RPC.Server getServer(Class protocolClass, + Object protocolImpl, String bindAddress, int port, + int numHandlers, int numReaders, int queueSizePerHandler, + boolean verbose, Configuration conf, SecretManager secretManager) throws IOException { - return new Server(instance, conf, bindAddress, port, numHandlers, - numReaders, queueSizePerHandler, verbose, secretManager); + return new Server(protocolClass, protocolImpl, conf, bindAddress, port, + numHandlers, numReaders, queueSizePerHandler, verbose, secretManager); } + /** An RPC Server. */ public static class Server extends RPC.Server { - private Object instance; private boolean verbose; + + /** + * The key in Map + */ + static class ProtoNameVer { + final String protocol; + final long version; + ProtoNameVer(String protocol, long ver) { + this.protocol = protocol; + this.version = ver; + } + @Override + public boolean equals(Object o) { + if (o == null) + return false; + if (this == o) + return true; + if (! (o instanceof ProtoNameVer)) + return false; + ProtoNameVer pv = (ProtoNameVer) o; + return ((pv.protocol.equals(this.protocol)) && + (pv.version == this.version)); + } + @Override + public int hashCode() { + return protocol.hashCode() * 37 + (int) version; + } + } + + /** + * The value in map + */ + static class ProtoClassProtoImpl { + final Class protocolClass; + final Object protocolImpl; + ProtoClassProtoImpl(Class protocolClass, Object protocolImpl) { + this.protocolClass = protocolClass; + this.protocolImpl = protocolImpl; + } + } + + private Map protocolImplMap = + new HashMap(10); + + // Register protocol and its impl for rpc calls + private void registerProtocolAndImpl(Class protocolClass, + Object protocolImpl) throws IOException { + String protocolName = RPC.getProtocolName(protocolClass); + VersionedProtocol vp = (VersionedProtocol) protocolImpl; + long version; + try { + version = vp.getProtocolVersion(protocolName, 0); + } catch (Exception ex) { + LOG.warn("Protocol " + protocolClass + + " NOT registered as getProtocolVersion throws exception "); + return; + } + protocolImplMap.put(new ProtoNameVer(protocolName, version), + new ProtoClassProtoImpl(protocolClass, protocolImpl)); + LOG.info("ProtocolImpl=" + protocolImpl.getClass().getName() + + " protocolClass=" + protocolClass.getName() + " version=" + version); + } + + private static class VerProtocolImpl { + final long version; + final ProtoClassProtoImpl protocolTarget; + VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) { + this.version = ver; + this.protocolTarget = protocolTarget; + } + } + + + @SuppressWarnings("unused") // will be useful later. + private VerProtocolImpl[] getSupportedProtocolVersions( + String protocolName) { + VerProtocolImpl[] resultk = new VerProtocolImpl[protocolImplMap.size()]; + int i = 0; + for (Map.Entry pv : + protocolImplMap.entrySet()) { + if (pv.getKey().protocol.equals(protocolName)) { + resultk[i++] = + new VerProtocolImpl(pv.getKey().version, pv.getValue()); + } + } + if (i == 0) { + return null; + } + VerProtocolImpl[] result = new VerProtocolImpl[i]; + System.arraycopy(resultk, 0, result, 0, i); + return result; + } + + private VerProtocolImpl getHighestSupportedProtocol(String protocolName) { + Long highestVersion = 0L; + ProtoClassProtoImpl highest = null; + for (Map.Entry pv : protocolImplMap + .entrySet()) { + if (pv.getKey().protocol.equals(protocolName)) { + if ((highest == null) || (pv.getKey().version > highestVersion)) { + highest = pv.getValue(); + highestVersion = pv.getKey().version; + } + } + } + if (highest == null) { + return null; + } + return new VerProtocolImpl(highestVersion, highest); + } + /** Construct an RPC server. * @param instance the instance whose methods will be called * @param conf the configuration to use * @param bindAddress the address to bind on to listen for connection * @param port the port to listen for connections on + * + * @deprecated Use #Server(Class, Object, Configuration, String, int) + * */ - public Server(Object instance, Configuration conf, String bindAddress, int port) + @Deprecated + public Server(Object instance, Configuration conf, String bindAddress, + int port) throws IOException { - this(instance, conf, bindAddress, port, 1, -1, -1, false, null); + this(null, instance, conf, bindAddress, port); + } + + + /** Construct an RPC server. + * @param protocol class + * @param instance the instance whose methods will be called + * @param conf the configuration to use + * @param bindAddress the address to bind on to listen for connection + * @param port the port to listen for connections on + */ + public Server(Class protocolClass, Object protocolImpl, + Configuration conf, String bindAddress, int port) + throws IOException { + this(protocolClass, protocolImpl, conf, bindAddress, port, 1, -1, -1, + false, null); } private static String classNameBase(String className) { @@ -307,35 +488,103 @@ private static String classNameBase(String className) { return names[names.length-1]; } + /** Construct an RPC server. - * @param instance the instance whose methods will be called + * @param protocolImpl the instance whose methods will be called + * @param conf the configuration to use + * @param bindAddress the address to bind on to listen for connection + * @param port the port to listen for connections on + * @param numHandlers the number of method handler threads to run + * @param verbose whether each call should be logged + * + * @deprecated use Server#Server(Class, Object, + * Configuration, String, int, int, int, int, boolean, SecretManager) + */ + @Deprecated + public Server(Object protocolImpl, Configuration conf, String bindAddress, + int port, int numHandlers, int numReaders, int queueSizePerHandler, + boolean verbose, SecretManager secretManager) + throws IOException { + this(null, protocolImpl, conf, bindAddress, port, + numHandlers, numReaders, queueSizePerHandler, verbose, + secretManager); + + } + + /** Construct an RPC server. + * @param protocolClass - the protocol being registered + * can be null for compatibility with old usage (see below for details) + * @param protocolImpl the protocol impl that will be called * @param conf the configuration to use * @param bindAddress the address to bind on to listen for connection * @param port the port to listen for connections on * @param numHandlers the number of method handler threads to run * @param verbose whether each call should be logged */ - public Server(Object instance, Configuration conf, String bindAddress, int port, - int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, - SecretManager secretManager) + public Server(Class protocolClass, Object protocolImpl, + Configuration conf, String bindAddress, int port, + int numHandlers, int numReaders, int queueSizePerHandler, + boolean verbose, SecretManager secretManager) throws IOException { super(bindAddress, port, Invocation.class, numHandlers, numReaders, queueSizePerHandler, conf, - classNameBase(instance.getClass().getName()), secretManager); - this.instance = instance; + classNameBase(protocolImpl.getClass().getName()), secretManager); + this.verbose = verbose; + + + Class[] protocols; + if (protocolClass == null) { // derive protocol from impl + /* + * In order to remain compatible with the old usage where a single + * target protocolImpl is suppled for all protocol interfaces, and + * the protocolImpl is derived from the protocolClass(es) + * we register all interfaces extended by the protocolImpl + */ + protocols = getProtocolInterfaces(protocolImpl.getClass()); + + } else { + if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) { + throw new IOException("protocolClass "+ protocolClass + + " is not implemented by protocolImpl which is of class " + + protocolImpl.getClass()); + } + // register protocol class and its super interfaces + registerProtocolAndImpl(protocolClass, protocolImpl); + protocols = getProtocolInterfaces(protocolClass); + } + for (Class p : protocols) { + if (!p.equals(VersionedProtocol.class)) { + registerProtocolAndImpl(p, protocolImpl); + } + } + } - public Writable call(Class protocol, Writable param, long receivedTime) + + @Override + public Server + addProtocol( + Class protocolClass, IMPL protocolImpl) throws IOException { + registerProtocolAndImpl(protocolClass, protocolImpl); + return this; + } + + /** + * Process a client call + * @param protocolName - the protocol name (the class of the client proxy + * used to make calls to the rpc server. + * @param param parameters + * @param receivedTime time at which the call receoved (for metrics) + * @return the call's return + * @throws IOException + */ + public Writable call(String protocolName, Writable param, long receivedTime) throws IOException { try { Invocation call = (Invocation)param; if (verbose) log("Call: " + call); - Method method = protocol.getMethod(call.getMethodName(), - call.getParameterClasses()); - method.setAccessible(true); - // Verify rpc version if (call.getRpcVersion() != writableRpcVersion) { // Client is using a different version of WritableRpc @@ -344,25 +593,51 @@ public Writable call(Class protocol, Writable param, long receivedTime) + call.getRpcVersion() + ", server side version=" + writableRpcVersion); } - - //Verify protocol version. - //Bypass the version check for VersionedProtocol - if (!method.getDeclaringClass().equals(VersionedProtocol.class)) { - long clientVersion = call.getProtocolVersion(); - ProtocolSignature serverInfo = ((VersionedProtocol) instance) - .getProtocolSignature(protocol.getCanonicalName(), call - .getProtocolVersion(), call.getClientMethodsHash()); - long serverVersion = serverInfo.getVersion(); - if (serverVersion != clientVersion) { - LOG.warn("Version mismatch: client version=" + clientVersion - + ", server version=" + serverVersion); - throw new RPC.VersionMismatch(protocol.getName(), clientVersion, - serverVersion); + + long clientVersion = call.getProtocolVersion(); + final String protoName; + ProtoClassProtoImpl protocolImpl; + if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) { + // VersionProtocol methods are often used by client to figure out + // which version of protocol to use. + // + // Versioned protocol methods should go the protocolName protocol + // rather than the declaring class of the method since the + // the declaring class is VersionedProtocol which is not + // registered directly. + // Send the call to the highest protocol version + protocolImpl = + getHighestSupportedProtocol(protocolName).protocolTarget; + } else { + protoName = call.declaringClassProtocolName; + + // Find the right impl for the protocol based on client version. + ProtoNameVer pv = + new ProtoNameVer(call.declaringClassProtocolName, clientVersion); + protocolImpl = protocolImplMap.get(pv); + if (protocolImpl == null) { // no match for Protocol AND Version + VerProtocolImpl highest = + getHighestSupportedProtocol(protoName); + if (highest == null) { + throw new IOException("Unknown protocol: " + protoName); + } else { // protocol supported but not the version that client wants + throw new RPC.VersionMismatch(protoName, clientVersion, + highest.version); + } } } + + + // Invoke the protocol method long startTime = System.currentTimeMillis(); - Object value = method.invoke(instance, call.getParameters()); + Method method = + protocolImpl.protocolClass.getMethod(call.getMethodName(), + call.getParameterClasses()); + method.setAccessible(true); + rpcDetailedMetrics.init(protocolImpl.protocolClass); + Object value = + method.invoke(protocolImpl.protocolImpl, call.getParameters()); int processingTime = (int) (System.currentTimeMillis() - startTime); int qTime = (int) (startTime-receivedTime); if (LOG.isDebugEnabled()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java index 6460120012..18dc7a0da7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java @@ -132,6 +132,12 @@ public void emitRecord(String contextName, String recordName, StringBuilder sb = new StringBuilder(); sb.append(contextName); sb.append('.'); + + if (contextName.equals("jvm") && outRec.getTag("processName") != null) { + sb.append(outRec.getTag("processName")); + sb.append('.'); + } + sb.append(recordName); sb.append('.'); int sbBaseLen = sb.length(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java index 8d90101f2c..37f91c9da9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java @@ -20,13 +20,21 @@ import java.io.IOException; import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.Map; +import java.util.Set; +import org.apache.commons.configuration.SubsetConfiguration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.MetricsTag; +import org.apache.hadoop.metrics2.impl.MsInfo; import org.apache.hadoop.metrics2.util.MetricsCache; import org.apache.hadoop.metrics2.util.MetricsCache.Record; @@ -38,8 +46,67 @@ public class GangliaSink30 extends AbstractGangliaSink { public final Log LOG = LogFactory.getLog(this.getClass()); + private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix."; + private MetricsCache metricsCache = new MetricsCache(); + // a key with a NULL value means ALL + private Map> useTagsMap = new HashMap>(); + + @Override + @SuppressWarnings("unchecked") + public void init(SubsetConfiguration conf) { + super.init(conf); + + conf.setListDelimiter(','); + Iterator it = (Iterator) conf.getKeys(); + while (it.hasNext()) { + String propertyName = it.next(); + if (propertyName.startsWith(TAGS_FOR_PREFIX_PROPERTY_PREFIX)) { + String contextName = propertyName.substring(TAGS_FOR_PREFIX_PROPERTY_PREFIX.length()); + String[] tags = conf.getStringArray(propertyName); + boolean useAllTags = false; + Set set = null; + if (tags.length > 0) { + set = new HashSet(); + for (String tag : tags) { + tag = tag.trim(); + useAllTags |= tag.equals("*"); + if (tag.length() > 0) { + set.add(tag); + } + } + if (useAllTags) { + set = null; + } + } + useTagsMap.put(contextName, set); + } + } + } + + @InterfaceAudience.Private + public void appendPrefix(MetricsRecord record, StringBuilder sb) { + String contextName = record.context(); + Collection tags = record.tags(); + if (useTagsMap.containsKey(contextName)) { + Set useTags = useTagsMap.get(contextName); + for (MetricsTag t : tags) { + if (useTags == null || useTags.contains(t.name())) { + + // the context is always skipped here because it is always added + + // the hostname is always skipped to avoid case-mismatches + // from different DNSes. + + if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) { + sb.append('.').append(t.name()).append('=').append(t.value()); + } + } + } + } + } + @Override public void putMetrics(MetricsRecord record) { // The method handles both cases whether Ganglia support dense publish @@ -53,6 +120,8 @@ public void putMetrics(MetricsRecord record) { sb.append('.'); sb.append(recordName); + appendPrefix(record, sb); + String groupName = sb.toString(); sb.append('.'); int sbBaseLen = sb.length(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java index 7f983f3e3d..cd6ab7b326 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security; -import org.apache.hadoop.alfredo.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; import org.apache.hadoop.http.FilterInitializer; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java index 35e8d39d6d..36f1943f50 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.alfredo.util.KerberosName; +import org.apache.hadoop.security.authentication.util.KerberosName; import sun.security.krb5.Config; import sun.security.krb5.KrbException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java index cbba1c62e3..46993e16aa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java @@ -158,10 +158,7 @@ public static void main(String[] args) throws Throwable { Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { - try { - FileUtil.fullyDelete(workDir); - } catch (IOException e) { - } + FileUtil.fullyDelete(workDir); } }); diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 8fc6017f6d..d4b4030559 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -317,6 +317,11 @@ org.apache.hadoop.hdfs.HsftpFileSystem + + fs.webhdfs.impl + org.apache.hadoop.hdfs.web.WebHdfsFileSystem + + fs.ftp.impl org.apache.hadoop.fs.ftp.FTPFileSystem diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java index 9205f640ec..6b3963b41a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java @@ -32,6 +32,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mortbay.log.Log; import static org.apache.hadoop.fs.FileSystemTestHelper.*; @@ -62,8 +63,6 @@ public abstract class FSMainOperationsBaseTest { private static String TEST_DIR_AXX = "test/hadoop/axx"; private static int numBlocks = 2; - static final String LOCAL_FS_ROOT_URI = "file:///tmp/test"; - protected static FileSystem fSys; @@ -83,7 +82,7 @@ public boolean accept(Path file) { } }; - private static byte[] data = getFileData(numBlocks, + protected static final byte[] data = getFileData(numBlocks, getDefaultBlockSize()); @Before @@ -183,7 +182,7 @@ public void testWorkingDirectory() throws Exception { @Test public void testWDAbsolute() throws IOException { - Path absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir"); + Path absoluteDir = new Path(fSys.getUri() + "/test/existingDir"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory()); @@ -646,7 +645,7 @@ public void testWriteReadAndDeleteTwoBlocks() throws Exception { writeReadAndDelete(getDefaultBlockSize() * 2); } - private void writeReadAndDelete(int len) throws IOException { + protected void writeReadAndDelete(int len) throws IOException { Path path = getTestRootPath(fSys, "test/hadoop/file"); fSys.mkdirs(path.getParent()); @@ -768,6 +767,7 @@ public void testRenameNonExistentPath() throws Exception { rename(src, dst, false, false, false, Rename.NONE); Assert.fail("Should throw FileNotFoundException"); } catch (IOException e) { + Log.info("XXX", e); Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java index 8b37f2aa6a..94c4b0c31f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java @@ -45,7 +45,7 @@ public abstract class FileSystemContractBaseTest extends TestCase { protected FileSystem fs; - private byte[] data = new byte[getBlockSize() * 2]; // two blocks of data + protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data { for (int i = 0; i < data.length; i++) { data[i] = (byte) (i % 10); @@ -215,7 +215,7 @@ public void testWriteReadAndDeleteTwoBlocks() throws Exception { writeReadAndDelete(getBlockSize() * 2); } - private void writeReadAndDelete(int len) throws IOException { + protected void writeReadAndDelete(int len) throws IOException { Path path = path("/test/hadoop/file"); fs.mkdirs(path.getParent()); @@ -256,7 +256,7 @@ public void testOverwrite() throws IOException { assertEquals("Length", data.length, fs.getFileStatus(path).getLen()); try { - fs.create(path, false); + fs.create(path, false).close(); fail("Should throw IOException."); } catch (IOException e) { // Expected diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java index b827ca4378..b2a9e16038 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java @@ -17,16 +17,15 @@ */ package org.apache.hadoop.fs; -import java.io.DataInputStream; import java.io.IOException; import java.io.FileNotFoundException; import java.net.URI; +import java.util.Random; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.IOUtils; import org.junit.Assert; - +import static org.junit.Assert.*; /** * Helper class for unit tests. @@ -143,23 +142,33 @@ public static boolean isDir(FileSystem fSys, Path p) throws IOException { } } - - public static void writeFile(FileSystem fSys, Path path,byte b[]) - throws Exception { - FSDataOutputStream out = - fSys.create(path); - out.write(b); - out.close(); + static String writeFile(FileSystem fileSys, Path name, int fileSize) + throws IOException { + final long seed = 0xDEADBEEFL; + // Create and write a file that contains three blocks of data + FSDataOutputStream stm = fileSys.create(name); + byte[] buffer = new byte[fileSize]; + Random rand = new Random(seed); + rand.nextBytes(buffer); + stm.write(buffer); + stm.close(); + return new String(buffer); } - public static byte[] readFile(FileSystem fSys, Path path, int len ) - throws Exception { - DataInputStream dis = fSys.open(path); - byte[] buffer = new byte[len]; - IOUtils.readFully(dis, buffer, 0, len); - dis.close(); - return buffer; + static String readFile(FileSystem fs, Path name, int buflen) + throws IOException { + byte[] b = new byte[buflen]; + int offset = 0; + FSDataInputStream in = fs.open(name); + for (int remaining, n; + (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1; + offset += n); + assertEquals(offset, Math.min(b.length, in.getPos())); + in.close(); + String s = new String(b, 0, offset); + return s; } + public static FileStatus containsPath(FileSystem fSys, Path path, FileStatus[] dirList) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java index 8ca095a648..373bdf12d5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFileSystem.java @@ -18,10 +18,9 @@ package org.apache.hadoop.fs; -import java.net.URI; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; +import static org.apache.hadoop.fs.FileSystemTestHelper.*; import org.apache.hadoop.conf.Configuration; import junit.framework.TestCase; @@ -56,13 +55,13 @@ public void testVerifyChecksum() throws Exception { // Exercise some boundary cases - a divisor of the chunk size // the chunk size, 2x chunk size, and +/-1 around these. - TestLocalFileSystem.readFile(localFs, testPath, 128); - TestLocalFileSystem.readFile(localFs, testPath, 511); - TestLocalFileSystem.readFile(localFs, testPath, 512); - TestLocalFileSystem.readFile(localFs, testPath, 513); - TestLocalFileSystem.readFile(localFs, testPath, 1023); - TestLocalFileSystem.readFile(localFs, testPath, 1024); - TestLocalFileSystem.readFile(localFs, testPath, 1025); + readFile(localFs, testPath, 128); + readFile(localFs, testPath, 511); + readFile(localFs, testPath, 512); + readFile(localFs, testPath, 513); + readFile(localFs, testPath, 1023); + readFile(localFs, testPath, 1024); + readFile(localFs, testPath, 1025); localFs.delete(localFs.getChecksumFile(testPath), true); assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath))); @@ -74,7 +73,7 @@ public void testVerifyChecksum() throws Exception { boolean errorRead = false; try { - TestLocalFileSystem.readFile(localFs, testPath, 1024); + readFile(localFs, testPath, 1024); }catch(ChecksumException ie) { errorRead = true; } @@ -83,7 +82,7 @@ public void testVerifyChecksum() throws Exception { //now setting verify false, the read should succeed try { localFs.setVerifyChecksum(false); - String str = TestLocalFileSystem.readFile(localFs, testPath, 1024); + String str = readFile(localFs, testPath, 1024).toString(); assertTrue("read", "testing".equals(str)); } finally { // reset for other tests @@ -104,13 +103,13 @@ public void testMultiChunkFile() throws Exception { // Exercise some boundary cases - a divisor of the chunk size // the chunk size, 2x chunk size, and +/-1 around these. - TestLocalFileSystem.readFile(localFs, testPath, 128); - TestLocalFileSystem.readFile(localFs, testPath, 511); - TestLocalFileSystem.readFile(localFs, testPath, 512); - TestLocalFileSystem.readFile(localFs, testPath, 513); - TestLocalFileSystem.readFile(localFs, testPath, 1023); - TestLocalFileSystem.readFile(localFs, testPath, 1024); - TestLocalFileSystem.readFile(localFs, testPath, 1025); + readFile(localFs, testPath, 128); + readFile(localFs, testPath, 511); + readFile(localFs, testPath, 512); + readFile(localFs, testPath, 513); + readFile(localFs, testPath, 1023); + readFile(localFs, testPath, 1024); + readFile(localFs, testPath, 1025); } /** @@ -140,7 +139,7 @@ public void testTruncatedChecksum() throws Exception { // Now reading the file should fail with a ChecksumException try { - TestLocalFileSystem.readFile(localFs, testPath, 1024); + readFile(localFs, testPath, 1024); fail("Did not throw a ChecksumException when reading truncated " + "crc file"); } catch(ChecksumException ie) { @@ -149,7 +148,7 @@ public void testTruncatedChecksum() throws Exception { // telling it not to verify checksums, should avoid issue. try { localFs.setVerifyChecksum(false); - String str = TestLocalFileSystem.readFile(localFs, testPath, 1024); + String str = readFile(localFs, testPath, 1024).toString(); assertTrue("read", "testing truncation".equals(str)); } finally { // reset for other tests diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java index f6cfa1c7ca..ffb1dcf1f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java @@ -29,7 +29,7 @@ public class TestDU extends TestCase { final static private File DU_DIR = new File( System.getProperty("test.build.data","/tmp"), "dutmp"); - public void setUp() throws IOException { + public void setUp() { FileUtil.fullyDelete(DU_DIR); assertTrue(DU_DIR.mkdirs()); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java index 6c7ac2e82c..ff1d099438 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java @@ -98,7 +98,7 @@ public class TestHardLink { * @throws IOException */ @BeforeClass - public static void setupClean() throws IOException { + public static void setupClean() { //delete source and target directories if they exist FileUtil.fullyDelete(src); FileUtil.fullyDelete(tgt_one); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java index eef90308aa..1e22a73bba 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs; import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; import org.apache.hadoop.conf.Configuration; @@ -208,4 +209,33 @@ public void test4() throws Exception { } } + /** Two buffer dirs. The first dir does not exist & is on a read-only disk; + * The second dir exists & is RW + * getLocalPathForWrite with checkAccess set to false should create a parent + * directory. With checkAccess true, the directory should not be created. + * @throws Exception + */ + public void testLocalPathForWriteDirCreation() throws IOException { + try { + conf.set(CONTEXT, BUFFER_DIR[0] + "," + BUFFER_DIR[1]); + assertTrue(localFs.mkdirs(BUFFER_PATH[1])); + BUFFER_ROOT.setReadOnly(); + Path p1 = + dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf); + assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory()); + + Path p2 = + dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf, + false); + try { + localFs.getFileStatus(p2.getParent()); + } catch (Exception e) { + assertEquals(e.getClass(), FileNotFoundException.class); + } + } finally { + Shell.execCommand(new String[] { "chmod", "u+w", BUFFER_DIR_ROOT }); + rmBufferDirs(); + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 45e0f4a338..6ccc201c55 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -18,37 +18,23 @@ package org.apache.hadoop.fs; import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.fs.FileSystemTestHelper.*; + import java.io.*; -import junit.framework.*; + +import static org.junit.Assert.*; +import org.junit.Before; +import org.junit.Test; /** * This class tests the local file system via the FileSystem abstraction. */ -public class TestLocalFileSystem extends TestCase { +public class TestLocalFileSystem { private static String TEST_ROOT_DIR = System.getProperty("test.build.data","build/test/data/work-dir/localfs"); - - static void writeFile(FileSystem fs, Path name) throws IOException { - FSDataOutputStream stm = fs.create(name); - stm.writeBytes("42\n"); - stm.close(); - } - - static String readFile(FileSystem fs, Path name, int buflen) throws IOException { - byte[] b = new byte[buflen]; - int offset = 0; - FSDataInputStream in = fs.open(name); - for(int remaining, n; - (remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1; - offset += n); - assertEquals(offset, Math.min(b.length, in.getPos())); - in.close(); - - String s = new String(b, 0, offset); - System.out.println("s=" + s); - return s; - } + private Configuration conf; + private FileSystem fileSys; private void cleanupFile(FileSystem fs, Path name) throws IOException { assertTrue(fs.exists(name)); @@ -56,12 +42,18 @@ private void cleanupFile(FileSystem fs, Path name) throws IOException { assertTrue(!fs.exists(name)); } + @Before + public void setup() throws IOException { + conf = new Configuration(); + fileSys = FileSystem.getLocal(conf); + fileSys.delete(new Path(TEST_ROOT_DIR), true); + } + /** * Test the capability of setting the working directory. */ + @Test public void testWorkingDirectory() throws IOException { - Configuration conf = new Configuration(); - FileSystem fileSys = FileSystem.getLocal(conf); Path origDir = fileSys.getWorkingDirectory(); Path subdir = new Path(TEST_ROOT_DIR, "new"); try { @@ -85,7 +77,7 @@ public void testWorkingDirectory() throws IOException { // create files and manipulate them. Path file1 = new Path("file1"); Path file2 = new Path("sub/file2"); - writeFile(fileSys, file1); + String contents = writeFile(fileSys, file1, 1); fileSys.copyFromLocalFile(file1, file2); assertTrue(fileSys.exists(file1)); assertTrue(fileSys.isFile(file1)); @@ -103,11 +95,10 @@ public void testWorkingDirectory() throws IOException { InputStream stm = fileSys.open(file1); byte[] buffer = new byte[3]; int bytesRead = stm.read(buffer, 0, 3); - assertEquals("42\n", new String(buffer, 0, bytesRead)); + assertEquals(contents, new String(buffer, 0, bytesRead)); stm.close(); } finally { fileSys.setWorkingDirectory(origDir); - fileSys.delete(subdir, true); } } @@ -115,6 +106,7 @@ public void testWorkingDirectory() throws IOException { * test Syncable interface on raw local file system * @throws IOException */ + @Test public void testSyncable() throws IOException { Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf).getRawFileSystem(); @@ -148,12 +140,13 @@ private void verifyFile(FileSystem fs, Path file, int bytesToVerify, } } + @Test public void testCopy() throws IOException { Configuration conf = new Configuration(); LocalFileSystem fs = FileSystem.getLocal(conf); Path src = new Path(TEST_ROOT_DIR, "dingo"); Path dst = new Path(TEST_ROOT_DIR, "yak"); - writeFile(fs, src); + writeFile(fs, src, 1); assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf)); assertTrue(!fs.exists(src) && fs.exists(dst)); assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf)); @@ -170,9 +163,12 @@ public void testCopy() throws IOException { try { FileUtil.copy(fs, dst, fs, src, true, true, conf); fail("Failed to detect existing dir"); - } catch (IOException e) { } + } catch (IOException e) { + // Expected + } } + @Test public void testHomeDirectory() throws IOException { Configuration conf = new Configuration(); FileSystem fileSys = FileSystem.getLocal(conf); @@ -182,16 +178,18 @@ public void testHomeDirectory() throws IOException { assertEquals(home, fsHome); } + @Test public void testPathEscapes() throws IOException { Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf); Path path = new Path(TEST_ROOT_DIR, "foo%bar"); - writeFile(fs, path); + writeFile(fs, path, 1); FileStatus status = fs.getFileStatus(path); assertEquals(path.makeQualified(fs), status.getPath()); cleanupFile(fs, path); } + @Test public void testMkdirs() throws IOException { Configuration conf = new Configuration(); LocalFileSystem fs = FileSystem.getLocal(conf); @@ -199,18 +197,40 @@ public void testMkdirs() throws IOException { Path test_file = new Path(TEST_ROOT_DIR, "file1"); assertTrue(fs.mkdirs(test_dir)); - writeFile(fs, test_file); + writeFile(fs, test_file, 1); // creating dir over a file Path bad_dir = new Path(test_file, "another_dir"); try { fs.mkdirs(bad_dir); fail("Failed to detect existing file in path"); - } catch (FileAlreadyExistsException e) { } + } catch (FileAlreadyExistsException e) { + // Expected + } try { fs.mkdirs(null); fail("Failed to detect null in mkdir arg"); - } catch (IllegalArgumentException e) { } + } catch (IllegalArgumentException e) { + // Expected + } + } + + /** Test deleting a file, directory, and non-existent path */ + @Test + public void testBasicDelete() throws IOException { + Configuration conf = new Configuration(); + LocalFileSystem fs = FileSystem.getLocal(conf); + Path dir1 = new Path(TEST_ROOT_DIR, "dir1"); + Path file1 = new Path(TEST_ROOT_DIR, "file1"); + Path file2 = new Path(TEST_ROOT_DIR+"/dir1", "file2"); + Path file3 = new Path(TEST_ROOT_DIR, "does-not-exist"); + assertTrue(fs.mkdirs(dir1)); + writeFile(fs, file1, 1); + writeFile(fs, file2, 1); + assertFalse("Returned true deleting non-existant path", + fs.delete(file3)); + assertTrue("Did not delete file", fs.delete(file1)); + assertTrue("Did not delete non-empty dir", fs.delete(dir1)); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index 4149af3c9b..3d739a07d8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -19,9 +19,9 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.*; +import static org.apache.hadoop.fs.FileSystemTestHelper.*; import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import java.io.PrintStream; @@ -42,14 +42,6 @@ public class TestTrash extends TestCase { new Path(new File(System.getProperty("test.build.data","/tmp") ).toURI().toString().replace(' ', '+'), "testTrash"); - protected static Path writeFile(FileSystem fs, Path f) throws IOException { - DataOutputStream out = fs.create(f); - out.writeBytes("dhruba: " + f); - out.close(); - assertTrue(fs.exists(f)); - return f; - } - protected static Path mkdir(FileSystem fs, Path p) throws IOException { assertTrue(fs.mkdirs(p)); assertTrue(fs.exists(p)); @@ -139,7 +131,7 @@ public static void trashShell(final Configuration conf, final Path base, // Second, create a file in that directory. Path myFile = new Path(base, "test/mkdirs/myFile"); - writeFile(fs, myFile); + writeFile(fs, myFile, 10); // Verify that expunge without Trash directory // won't throw Exception @@ -176,7 +168,7 @@ public static void trashShell(final Configuration conf, final Path base, } // Verify that we can recreate the file - writeFile(fs, myFile); + writeFile(fs, myFile, 10); // Verify that we succeed in removing the file we re-created { @@ -194,7 +186,7 @@ public static void trashShell(final Configuration conf, final Path base, } // Verify that we can recreate the file - writeFile(fs, myFile); + writeFile(fs, myFile, 10); // Verify that we succeed in removing the whole directory // along with the file inside it. @@ -234,7 +226,7 @@ public static void trashShell(final Configuration conf, final Path base, { Path toErase = new Path(trashRoot, "toErase"); int retVal = -1; - writeFile(trashRootFs, toErase); + writeFile(trashRootFs, toErase, 10); try { retVal = shell.run(new String[] {"-rm", toErase.toString()}); } catch (Exception e) { @@ -265,7 +257,7 @@ public static void trashShell(final Configuration conf, final Path base, // recreate directory and file mkdir(fs, myPath); - writeFile(fs, myFile); + writeFile(fs, myFile, 10); // remove file first, then remove directory { @@ -316,7 +308,7 @@ public static void trashShell(final Configuration conf, final Path base, // recreate directory and file mkdir(fs, myPath); - writeFile(fs, myFile); + writeFile(fs, myFile, 10); // Verify that skip trash option really skips the trash for files (rm) { @@ -346,7 +338,7 @@ public static void trashShell(final Configuration conf, final Path base, // recreate directory and file mkdir(fs, myPath); - writeFile(fs, myFile); + writeFile(fs, myFile, 10); // Verify that skip trash option really skips the trash for rmr { @@ -392,7 +384,7 @@ public static void trashShell(final Configuration conf, final Path base, for(int i=0;i params = request.getParameterMap(); - SortedSet keys = new TreeSet(params.keySet()); + SortedSet keys = new TreeSet(params.keySet()); for(String key: keys) { out.print(key); out.print(':'); @@ -101,7 +101,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { PrintWriter out = response.getWriter(); - SortedSet sortedKeys = new TreeSet(); + SortedSet sortedKeys = new TreeSet(); Enumeration keys = request.getParameterNames(); while(keys.hasMoreElements()) { sortedKeys.add(keys.nextElement()); @@ -118,7 +118,6 @@ public void doGet(HttpServletRequest request, @SuppressWarnings("serial") public static class HtmlContentServlet extends HttpServlet { - @SuppressWarnings("unchecked") @Override public void doGet(HttpServletRequest request, HttpServletResponse response @@ -131,10 +130,14 @@ public void doGet(HttpServletRequest request, } @BeforeClass public static void setup() throws Exception { - server = createTestServer(); + Configuration conf = new Configuration(); + conf.setInt(HttpServer.HTTP_MAX_THREADS, 10); + server = createTestServer(conf); server.addServlet("echo", "/echo", EchoServlet.class); server.addServlet("echomap", "/echomap", EchoMapServlet.class); server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class); + server.addJerseyResourcePackage( + JerseyResource.class.getPackage().getName(), "/jersey/*"); server.start(); baseUrl = getServerURL(server); LOG.info("HTTP server started: "+ baseUrl); @@ -161,7 +164,8 @@ public void run() { assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); int serverThreads = server.webServer.getThreadPool().getThreads(); - assertTrue(serverThreads <= MAX_THREADS); + assertTrue("More threads are started than expected, Server Threads count: " + + serverThreads, serverThreads <= MAX_THREADS); System.out.println("Number of threads = " + serverThreads + " which is less or equal than the max = " + MAX_THREADS); } catch (Exception e) { @@ -404,4 +408,18 @@ public void testRequestQuoterWithNotNull() throws Exception { values, parameterValues)); } + @SuppressWarnings("unchecked") + private static Map parse(String jsonString) { + return (Map)JSON.parse(jsonString); + } + + @Test public void testJersey() throws Exception { + LOG.info("BEGIN testJersey()"); + final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar")); + final Map m = parse(js); + LOG.info("m=" + m); + assertEquals("foo", m.get(JerseyResource.PATH)); + assertEquals("bar", m.get(JerseyResource.OP)); + LOG.info("END testJersey()"); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java new file mode 100644 index 0000000000..f1313e26ca --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http.resource; + +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; + +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.mortbay.util.ajax.JSON; + +/** + * A simple Jersey resource class TestHttpServer. + * The servlet simply puts the path and the op parameter in a map + * and return it in JSON format in the response. + */ +@Path("") +public class JerseyResource { + static final Log LOG = LogFactory.getLog(JerseyResource.class); + + public static final String PATH = "path"; + public static final String OP = "op"; + + @GET + @Path("{" + PATH + ":.*}") + @Produces({MediaType.APPLICATION_JSON}) + public Response get( + @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, + @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op + ) throws IOException { + LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op); + + final Map m = new TreeMap(); + m.put(PATH, path); + m.put(OP, op); + final String js = JSON.toString(m); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 6c3f5b1b39..51d044bda6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -50,7 +50,7 @@ public void checkLoaded() { } @Before - public void setupTestDir() throws IOException { + public void setupTestDir() { FileUtil.fullyDelete(TEST_DIR); TEST_DIR.mkdirs(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index cf272026ed..5d04c20023 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -97,7 +97,7 @@ public TestServer(int handlerCount, boolean sleep, } @Override - public Writable call(Class protocol, Writable param, long receiveTime) + public Writable call(String protocol, Writable param, long receiveTime) throws IOException { if (sleep) { // sleep a bit diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java index 3710198295..d4400effa7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java @@ -72,7 +72,7 @@ public TestServer(final int handlerCount, final boolean sleep) } @Override - public Writable call(Class protocol, Writable param, long receiveTime) + public Writable call(String protocol, Writable param, long receiveTime) throws IOException { if (sleep) { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java new file mode 100644 index 0000000000..203c2855bc --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java @@ -0,0 +1,255 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ipc; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.junit.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; +import org.junit.Before; +import org.junit.After; +import org.junit.Test; + +public class TestMultipleProtocolServer { + private static final String ADDRESS = "0.0.0.0"; + private static InetSocketAddress addr; + private static RPC.Server server; + + private static Configuration conf = new Configuration(); + + + @ProtocolInfo(protocolName="Foo") + interface Foo0 extends VersionedProtocol { + public static final long versionID = 0L; + String ping() throws IOException; + + } + + @ProtocolInfo(protocolName="Foo") + interface Foo1 extends VersionedProtocol { + public static final long versionID = 1L; + String ping() throws IOException; + String ping2() throws IOException; + } + + @ProtocolInfo(protocolName="Foo") + interface FooUnimplemented extends VersionedProtocol { + public static final long versionID = 2L; + String ping() throws IOException; + } + + interface Mixin extends VersionedProtocol{ + public static final long versionID = 0L; + void hello() throws IOException; + } + interface Bar extends Mixin, VersionedProtocol { + public static final long versionID = 0L; + int echo(int i) throws IOException; + } + + + + class Foo0Impl implements Foo0 { + + @Override + public long getProtocolVersion(String protocol, long clientVersion) + throws IOException { + return Foo0.versionID; + } + + @SuppressWarnings("unchecked") + @Override + public ProtocolSignature getProtocolSignature(String protocol, + long clientVersion, int clientMethodsHash) throws IOException { + Class inter; + try { + inter = (Class)getClass(). + getGenericInterfaces()[0]; + } catch (Exception e) { + throw new IOException(e); + } + return ProtocolSignature.getProtocolSignature(clientMethodsHash, + getProtocolVersion(protocol, clientVersion), inter); + } + + @Override + public String ping() { + return "Foo0"; + } + + } + + class Foo1Impl implements Foo1 { + + @Override + public long getProtocolVersion(String protocol, long clientVersion) + throws IOException { + return Foo1.versionID; + } + + @SuppressWarnings("unchecked") + @Override + public ProtocolSignature getProtocolSignature(String protocol, + long clientVersion, int clientMethodsHash) throws IOException { + Class inter; + try { + inter = (Class)getClass(). + getGenericInterfaces()[0]; + } catch (Exception e) { + throw new IOException(e); + } + return ProtocolSignature.getProtocolSignature(clientMethodsHash, + getProtocolVersion(protocol, clientVersion), inter); + } + + @Override + public String ping() { + return "Foo1"; + } + + @Override + public String ping2() { + return "Foo1"; + + } + + } + + + class BarImpl implements Bar { + + @Override + public long getProtocolVersion(String protocol, long clientVersion) + throws IOException { + return Bar.versionID; + } + + @SuppressWarnings("unchecked") + @Override + public ProtocolSignature getProtocolSignature(String protocol, + long clientVersion, int clientMethodsHash) throws IOException { + Class inter; + try { + inter = (Class)getClass(). + getGenericInterfaces()[0]; + } catch (Exception e) { + throw new IOException(e); + } + return ProtocolSignature.getProtocolSignature(clientMethodsHash, + getProtocolVersion(protocol, clientVersion), inter); + } + + @Override + public int echo(int i) { + return i; + } + + @Override + public void hello() { + + + } + } + @Before + public void setUp() throws Exception { + // create a server with two handlers + server = RPC.getServer(Foo0.class, + new Foo0Impl(), ADDRESS, 0, 2, false, conf, null); + server.addProtocol(Foo1.class, new Foo1Impl()); + server.addProtocol(Bar.class, new BarImpl()); + server.addProtocol(Mixin.class, new BarImpl()); + server.start(); + addr = NetUtils.getConnectAddress(server); + } + + @After + public void tearDown() throws Exception { + server.stop(); + } + + @Test + public void test1() throws IOException { + ProtocolProxy proxy; + proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf); + + Foo0 foo0 = (Foo0)proxy.getProxy(); + Assert.assertEquals("Foo0", foo0.ping()); + + + proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf); + + + Foo1 foo1 = (Foo1)proxy.getProxy(); + Assert.assertEquals("Foo1", foo1.ping()); + Assert.assertEquals("Foo1", foo1.ping()); + + + proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf); + + + Bar bar = (Bar)proxy.getProxy(); + Assert.assertEquals(99, bar.echo(99)); + + // Now test Mixin class method + + Mixin mixin = bar; + mixin.hello(); + } + + + // Server does not implement the FooUnimplemented version of protocol Foo. + // See that calls to it fail. + @Test(expected=IOException.class) + public void testNonExistingProtocol() throws IOException { + ProtocolProxy proxy; + proxy = RPC.getProtocolProxy(FooUnimplemented.class, + FooUnimplemented.versionID, addr, conf); + + FooUnimplemented foo = (FooUnimplemented)proxy.getProxy(); + foo.ping(); + } + + + /** + * getProtocolVersion of an unimplemented version should return highest version + * Similarly getProtocolSignature should work. + * @throws IOException + */ + @Test + public void testNonExistingProtocol2() throws IOException { + ProtocolProxy proxy; + proxy = RPC.getProtocolProxy(FooUnimplemented.class, + FooUnimplemented.versionID, addr, conf); + + FooUnimplemented foo = (FooUnimplemented)proxy.getProxy(); + Assert.assertEquals(Foo1.versionID, + foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class), + FooUnimplemented.versionID)); + foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class), + FooUnimplemented.versionID, 0); + } + + @Test(expected=IOException.class) + public void testIncorrectServerCreation() throws IOException { + RPC.getServer(Foo1.class, + new Foo0Impl(), ADDRESS, 0, 2, false, conf, null); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java index 02ca2afe42..85e60dde9f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java @@ -39,7 +39,7 @@ public class TestRPCCompatibility { private static final String ADDRESS = "0.0.0.0"; private static InetSocketAddress addr; - private static Server server; + private static RPC.Server server; private ProtocolProxy proxy; public static final Log LOG = @@ -52,10 +52,12 @@ public interface TestProtocol0 extends VersionedProtocol { void ping() throws IOException; } - public interface TestProtocol1 extends TestProtocol0 { + public interface TestProtocol1 extends VersionedProtocol, TestProtocol0 { String echo(String value) throws IOException; } + @ProtocolInfo(protocolName= + "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1") public interface TestProtocol2 extends TestProtocol1 { int echo(int value) throws IOException; } @@ -89,11 +91,23 @@ public ProtocolSignature getProtocolSignature(String protocol, public static class TestImpl1 extends TestImpl0 implements TestProtocol1 { @Override public String echo(String value) { return value; } + @Override + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException { + return TestProtocol1.versionID; + } } public static class TestImpl2 extends TestImpl1 implements TestProtocol2 { @Override public int echo(int value) { return value; } + + @Override + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException { + return TestProtocol2.versionID; + } + } @After @@ -109,8 +123,10 @@ public void tearDown() throws IOException { @Test // old client vs new server public void testVersion0ClientVersion1Server() throws Exception { // create a server with two handlers + TestImpl1 impl = new TestImpl1(); server = RPC.getServer(TestProtocol1.class, - new TestImpl1(), ADDRESS, 0, 2, false, conf, null); + impl, ADDRESS, 0, 2, false, conf, null); + server.addProtocol(TestProtocol0.class, impl); server.start(); addr = NetUtils.getConnectAddress(server); @@ -172,8 +188,10 @@ public void ping() throws IOException { @Test // Compatible new client & old server public void testVersion2ClientVersion1Server() throws Exception { // create a server with two handlers + TestImpl1 impl = new TestImpl1(); server = RPC.getServer(TestProtocol1.class, - new TestImpl1(), ADDRESS, 0, 2, false, conf, null); + impl, ADDRESS, 0, 2, false, conf, null); + server.addProtocol(TestProtocol0.class, impl); server.start(); addr = NetUtils.getConnectAddress(server); @@ -190,8 +208,10 @@ public void testVersion2ClientVersion1Server() throws Exception { @Test // equal version client and server public void testVersion2ClientVersion2Server() throws Exception { // create a server with two handlers + TestImpl2 impl = new TestImpl2(); server = RPC.getServer(TestProtocol2.class, - new TestImpl2(), ADDRESS, 0, 2, false, conf, null); + impl, ADDRESS, 0, 2, false, conf, null); + server.addProtocol(TestProtocol0.class, impl); server.start(); addr = NetUtils.getConnectAddress(server); @@ -250,14 +270,16 @@ public void testHashCode() throws Exception { assertEquals(hash1, hash2); } + @ProtocolInfo(protocolName= + "org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1") public interface TestProtocol4 extends TestProtocol2 { - public static final long versionID = 1L; + public static final long versionID = 4L; int echo(int value) throws IOException; } @Test public void testVersionMismatch() throws IOException { - server = RPC.getServer(TestProtocol2.class, new TestImpl0(), ADDRESS, 0, 2, + server = RPC.getServer(TestProtocol2.class, new TestImpl2(), ADDRESS, 0, 2, false, conf, null); server.start(); addr = NetUtils.getConnectAddress(server); @@ -268,7 +290,8 @@ public void testVersionMismatch() throws IOException { proxy.echo(21); fail("The call must throw VersionMismatch exception"); } catch (IOException ex) { - Assert.assertTrue(ex.getMessage().contains("VersionMismatch")); + Assert.assertTrue("Expected version mismatch but got " + ex.getMessage(), + ex.getMessage().contains("VersionMismatch")); } } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java index 9d78ba77bc..10012348b4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java @@ -26,12 +26,17 @@ import java.net.DatagramSocket; import java.net.SocketException; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.MetricsRegistry; @@ -54,6 +59,44 @@ public class TestGangliaMetrics { "test.s1rec.S1NumOps", "test.s1rec.S1AvgTime" }; + @Test + public void testTagsForPrefix() throws Exception { + ConfigBuilder cb = new ConfigBuilder() + .add("test.sink.ganglia.tagsForPrefix.all", "*") + .add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, NumActiveSources") + .add("test.sink.ganglia.tagsForPrefix.none", ""); + GangliaSink30 sink = new GangliaSink30(); + sink.init(cb.subset("test.sink.ganglia")); + + List tags = new ArrayList(); + tags.add(new MetricsTag(MsInfo.Context, "all")); + tags.add(new MetricsTag(MsInfo.NumActiveSources, "foo")); + tags.add(new MetricsTag(MsInfo.NumActiveSinks, "bar")); + tags.add(new MetricsTag(MsInfo.NumAllSinks, "haa")); + tags.add(new MetricsTag(MsInfo.Hostname, "host")); + Set metrics = new HashSet(); + MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 1, tags, metrics); + + StringBuilder sb = new StringBuilder(); + sink.appendPrefix(record, sb); + assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa", sb.toString()); + + tags.set(0, new MetricsTag(MsInfo.Context, "some")); + sb = new StringBuilder(); + sink.appendPrefix(record, sb); + assertEquals(".NumActiveSources=foo.NumActiveSinks=bar", sb.toString()); + + tags.set(0, new MetricsTag(MsInfo.Context, "none")); + sb = new StringBuilder(); + sink.appendPrefix(record, sb); + assertEquals("", sb.toString()); + + tags.set(0, new MetricsTag(MsInfo.Context, "nada")); + sb = new StringBuilder(); + sink.appendPrefix(record, sb); + assertEquals("", sb.toString()); + } + @Test public void testGangliaMetrics2() throws Exception { ConfigBuilder cb = new ConfigBuilder().add("default.period", 10) .add("test.sink.gsink30.context", "test") // filter out only "test" diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java index a820cd49b3..7a21e4c6b8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java @@ -18,7 +18,7 @@ import junit.framework.TestCase; -import org.apache.hadoop.alfredo.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; import org.mockito.Mockito; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java index 3506b5de71..0cec473c52 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java @@ -49,8 +49,7 @@ protected void setUp() } @After - protected void tearDown() - throws Exception { + protected void tearDown() { FileUtil.fullyDelete(TEST_ROOT_DIR); } diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index 552b3c76c6..ac196188a7 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -29,6 +29,7 @@ hadoop-auth + hadoop-auth-examples hadoop-common hadoop-annotations diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a33030c5e5..ea81b8034e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -5,9 +5,31 @@ Trunk (unreleased changes) HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel via hairong) + IMPROVEMENTS + + HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia) + + HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants -> + HdfsConstants. (Harsh J Chouraria via atm) + HDFS-2197. Refactor RPC call implementations out of NameNode class (todd) + + HDFS-2018. Move all journal stream management code into one place. + (Ivan Kelly via jitendra) + + HDFS-2223. Untangle depencencies between NN components (todd) + BUG FIXES HDFS-2287. TestParallelRead has a small off-by-one bug. (todd) + HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G + via atm) + HDFS-2310. TestBackupNode fails since HADOOP-7524 went in. + (Ivan Kelly via todd) + + HDFS-2313. Rat excludes has a typo for excluding editsStored files. (atm) + + HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd) + Release 0.23.0 - Unreleased INCOMPATIBLE CHANGES @@ -687,6 +709,9 @@ Release 0.23.0 - Unreleased HDFS-2266. Add Namesystem and SafeMode interfaces to avoid directly referring to FSNamesystem in BlockManager. (szetszwo) + HDFS-1217. Change some NameNode methods from public to package private. + (Laxman via szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image @@ -1003,6 +1028,9 @@ Release 0.23.0 - Unreleased HDFS-2286. DataXceiverServer logs AsynchronousCloseException at shutdown (todd) + HDFS-2289. Ensure jsvc is bundled with the HDFS distribution artifact. + (Alejandro Abdelnur via acmurthy) + BREAKDOWN OF HDFS-1073 SUBTASKS HDFS-1521. Persist transaction ID on disk between NN restarts. @@ -1086,6 +1114,7 @@ Release 0.22.0 - Unreleased (jghoman) HDFS-1330. Make RPCs to DataNodes timeout. (hairong) + Added additional unit tests per HADOOP-6889. (John George via mattf) HDFS-202. HDFS support of listLocatedStatus introduced in HADOOP-6870. HDFS piggyback block locations to each file status when listing a @@ -1541,6 +1570,11 @@ Release 0.22.0 - Unreleased HDFS-1981. NameNode does not saveNamespace() when editsNew is empty. (Uma Maheswara Rao G via shv) + HDFS-2258. Reset lease limits to default values in TestLeaseRecovery2. (shv) + + HDFS-2232. Generalize regular expressions in TestHDFSCLI. + (Plamen Jeliazkov via shv) + Release 0.21.1 - Unreleased HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index c49c5151ad..9cdab097f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -296,7 +296,7 @@ src/test/all-tests src/test/resources/*.tgz src/test/resources/data* - src/test/resources/editStored* + src/test/resources/editsStored* src/test/resources/empty-file src/main/webapps/datanode/robots.txt src/main/docs/releasenotes.html @@ -304,6 +304,56 @@ + + org.apache.maven.plugins + maven-antrun-plugin + + + xprepare-package-hadoop-daemon + prepare-package + + run + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 8e8b1cfafc..ece940bdd9 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -118,7 +118,7 @@ if [ "$starting_secure_dn" = "true" ]; then HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid" fi - exec "$HADOOP_HDFS_HOME/bin/jsvc" \ + exec "$HADOOP_HDFS_HOME/libexec/jsvc" \ -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \ -errfile "$HADOOP_LOG_DIR/jsvc.err" \ -pidfile "$HADOOP_SECURE_DN_PID" \ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml index 2bfa2e0bbd..0d3ed89c7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml @@ -505,7 +505,7 @@ using 'bin/hadoop dfsadmin -safemode' command. NameNode front page shows whether Safemode is on or off. A more detailed description and configuration is maintained as JavaDoc for - setSafeMode(). + setSafeMode().

fsck diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 642f60be8b..7772ad9792 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -70,9 +70,9 @@ public class Hdfs extends AbstractFileSystem { * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { - super(theUri, FSConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); + super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); - if (!theUri.getScheme().equalsIgnoreCase(FSConstants.HDFS_URI_SCHEME)) { + if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) { throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs"); } String host = theUri.getHost(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 237372377e..85639afc1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -60,10 +60,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -156,14 +156,14 @@ static class Conf { DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT); confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsConstants.WRITE_TIMEOUT); + HdfsServerConstants.WRITE_TIMEOUT); ioBufferSize = conf.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT); socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsConstants.READ_TIMEOUT); + HdfsServerConstants.READ_TIMEOUT); /** dfs.write.packet.size is an internal config variable */ writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); @@ -279,12 +279,12 @@ int getMaxBlockAcquireFailures() { */ int getDatanodeWriteTimeout(int numNodes) { return (dfsClientConf.confTime > 0) ? - (dfsClientConf.confTime + HdfsConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0; + (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0; } int getDatanodeReadTimeout(int numNodes) { return dfsClientConf.socketTimeout > 0 ? - (HdfsConstants.READ_TIMEOUT_EXTENSION * numNodes + + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes + dfsClientConf.socketTimeout) : 0; } @@ -1046,7 +1046,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(NetUtils.getInputStream(sock)); if (LOG.isDebugEnabled()) { @@ -1225,7 +1225,7 @@ public DatanodeInfo[] datanodeReport(DatanodeReportType type) /** * Enter, leave or get safe mode. * - * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction) + * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction) */ public boolean setSafeMode(SafeModeAction action) throws IOException { return namenode.setSafeMode(action); @@ -1293,7 +1293,7 @@ public void finalizeUpgrade() throws IOException { } /** - * @see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction) + * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction) */ public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) throws IOException { @@ -1392,10 +1392,10 @@ ContentSummary getContentSummary(String src) throws IOException { void setQuota(String src, long namespaceQuota, long diskspaceQuota) throws IOException { // sanity check - if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET && - namespaceQuota != FSConstants.QUOTA_RESET) || - (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET && - diskspaceQuota != FSConstants.QUOTA_RESET)) { + if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET && + namespaceQuota != HdfsConstants.QUOTA_RESET) || + (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET && + diskspaceQuota != HdfsConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Invalid values for quota : " + namespaceQuota + " and " + diskspaceQuota); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 03879338dd..c330297cd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -166,7 +166,7 @@ private class Packet { this.seqno = HEART_BEAT_SEQNO; buffer = null; - int packetSize = PacketHeader.PKT_HEADER_LEN + FSConstants.BYTES_IN_INTEGER; + int packetSize = PacketHeader.PKT_HEADER_LEN + HdfsConstants.BYTES_IN_INTEGER; buf = new byte[packetSize]; checksumStart = dataStart = packetSize; @@ -234,12 +234,12 @@ ByteBuffer getBuffer() { dataStart - checksumLen , checksumLen); } - int pktLen = FSConstants.BYTES_IN_INTEGER + dataLen + checksumLen; + int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen; //normally dataStart == checksumPos, i.e., offset is zero. buffer = ByteBuffer.wrap( buf, dataStart - checksumPos, - PacketHeader.PKT_HEADER_LEN + pktLen - FSConstants.BYTES_IN_INTEGER); + PacketHeader.PKT_HEADER_LEN + pktLen - HdfsConstants.BYTES_IN_INTEGER); buf = null; buffer.mark(); @@ -849,7 +849,7 @@ private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets, final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2); out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(sock, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); //send the TRANSFER_BLOCK request new Sender(out).transferBlock(block, blockToken, dfsClient.clientName, @@ -1023,7 +1023,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS, // out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); assert null == blockReplyStream : "Previous blockReplyStream unclosed"; blockReplyStream = new DataInputStream(NetUtils.getInputStream(s)); @@ -1173,7 +1173,7 @@ static Socket createSocketForPipeline(final DatanodeInfo first, final int timeout = client.getDatanodeReadTimeout(length); NetUtils.connect(sock, isa, timeout); sock.setSoTimeout(timeout); - sock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE); + sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 5d32c7a05e..e884f1c63f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -646,7 +646,7 @@ static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr, static ClientProtocol createNamenode(ClientProtocol rpcNamenode) throws IOException { RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( - 5, FSConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); + 5, HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); Map,RetryPolicy> remoteExceptionToPolicyMap = new HashMap, RetryPolicy>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index ef5ad425c9..68f8616941 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -49,9 +49,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -108,7 +108,7 @@ public void initialize(URI uri, Configuration conf) throws IOException { InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); - this.uri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority()); + this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority()); this.workingDir = getHomeDirectory(); } @@ -642,9 +642,9 @@ public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type * Enter, leave or get safe mode. * * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode( - * FSConstants.SafeModeAction) + * HdfsConstants.SafeModeAction) */ - public boolean setSafeMode(FSConstants.SafeModeAction action) + public boolean setSafeMode(HdfsConstants.SafeModeAction action) throws IOException { return dfs.setSafeMode(action); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java index ba26ad2c24..35d45bac32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.StringUtils; @@ -162,7 +162,7 @@ private synchronized void remove(final LeaseRenewer r) { /** The time in milliseconds that the map became empty. */ private long emptyTime = Long.MAX_VALUE; /** A fixed lease renewal time period in milliseconds */ - private long renewal = FSConstants.LEASE_SOFTLIMIT_PERIOD/2; + private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2; /** A daemon for renewing lease */ private Daemon daemon = null; @@ -352,7 +352,7 @@ synchronized void closeClient(final DFSClient dfsc) { //update renewal time if (renewal == dfsc.getHdfsTimeout()/2) { - long min = FSConstants.LEASE_SOFTLIMIT_PERIOD; + long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; for(DFSClient c : dfsclients) { if (c.getHdfsTimeout() > 0) { final long timeout = c.getHdfsTimeout(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index 51311f5216..0be0bb9fb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; @@ -394,7 +394,7 @@ public static RemoteBlockReader newBlockReader( Socket sock, String file, throws IOException { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( - NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT))); + NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT))); new Sender(out).readBlock(block, blockToken, clientName, startOffset, len); // @@ -486,7 +486,7 @@ public boolean hasSentStatusCode() { void sendReadResult(Socket sock, Status statusCode) { assert !sentStatusCode : "already sent status code to " + sock; try { - OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT); + OutputStream out = NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT); ClientReadStatusProto.newBuilder() .setStatus(statusCode) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java index 165096f24b..e1006a65d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 4153110ca9..e2ecbaa46d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; @@ -578,7 +578,7 @@ public void renewLease(String clientName) throws AccessControlException, * Return live datanodes if type is LIVE; dead datanodes if type is DEAD; * otherwise all datanodes if type is ALL. */ - public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type) + public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type) throws IOException; /** @@ -601,7 +601,7 @@ public long getPreferredBlockSize(String filename) *

* Safe mode is entered automatically at name node startup. * Safe mode can also be entered manually using - * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}. + * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}. *

* At startup the name node accepts data node reports collecting * information about block locations. @@ -617,11 +617,11 @@ public long getPreferredBlockSize(String filename) * Then the name node leaves safe mode. *

* If safe mode is turned on manually using - * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)} + * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)} * then the name node stays in safe mode until it is manually turned off - * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}. + * using {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}. * Current state of the name node can be verified using - * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)} + * {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)} *

Configuration parameters:

* dfs.safemode.threshold.pct is the threshold parameter.
* dfs.safemode.extension is the safe mode extension parameter.
@@ -644,7 +644,7 @@ public long getPreferredBlockSize(String filename) * * @throws IOException */ - public boolean setSafeMode(FSConstants.SafeModeAction action) + public boolean setSafeMode(HdfsConstants.SafeModeAction action) throws IOException; /** @@ -685,7 +685,7 @@ public boolean setSafeMode(FSConstants.SafeModeAction action) /** * Report distributed upgrade progress or force current upgrade to proceed. * - * @param action {@link FSConstants.UpgradeAction} to perform + * @param action {@link HdfsConstants.UpgradeAction} to perform * @return upgrade status information or null if no upgrades are in progress * @throws IOException */ @@ -777,8 +777,8 @@ public ContentSummary getContentSummary(String path) *

* * The quota can have three types of values : (1) 0 or more will set - * the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies - * the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET} + * the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies + * the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET} * implies the quota will be reset. Any other value is a runtime error. * * @throws AccessControlException permission denied diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index f023d73c93..4a456c94f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -26,9 +26,9 @@ * ************************************/ @InterfaceAudience.Private -public final class FSConstants { +public final class HdfsConstants { /* Hidden constructor */ - private FSConstants() { + private HdfsConstants() { } public static int MIN_BLOCKS_FOR_WRITE = 5; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 260cd7600b..2f224409f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -55,15 +55,15 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; @@ -306,13 +306,13 @@ private void dispatch() { DataInputStream in = null; try { sock.connect(NetUtils.createSocketAddr( - target.datanode.getName()), HdfsConstants.READ_TIMEOUT); + target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT); sock.setKeepAlive(true); out = new DataOutputStream( new BufferedOutputStream( - sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE)); + sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE)); sendRequest(out); in = new DataInputStream( new BufferedInputStream( - sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE)); + sock.getInputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE)); receiveResponse(in); bytesMoved.inc(block.getNumBytes()); LOG.info( "Moving block " + block.getBlock().getBlockId() + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index e7c160af25..293d5c5969 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.util.LightWeightGSet; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index c3f676e3d2..29565ace47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -22,8 +22,8 @@ import java.util.List; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.NameNode; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4e45449b1f..682d272922 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -50,8 +50,8 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 1b483a7537..6455b579a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; @@ -439,7 +439,7 @@ private boolean isGoodTarget(DatanodeDescriptor node, long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize); // check the remaining capacity of the target machine - if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) { + if (blockSize* HdfsConstants.MIN_BLOCKS_FOR_WRITE>remaining) { if(LOG.isDebugEnabled()) { threadLocalBuilder.get().append(node.toString()).append(": ") .append("Node ").append(NodeBase.getPath(node)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index a069d761f3..e0c2de955a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index ef83ad2db8..256e5d663e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -29,9 +29,9 @@ ************************************/ @InterfaceAudience.Private -public final class HdfsConstants { +public final class HdfsServerConstants { /* Hidden constructor */ - private HdfsConstants() { } + private HdfsServerConstants() { } /** * Type of the node diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java index 990b089fd7..5f0b2604b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; /** * The exception is thrown when external version does not match @@ -34,7 +34,7 @@ public class IncorrectVersionException extends IOException { private static final long serialVersionUID = 1L; public IncorrectVersionException(int versionReported, String ofWhat) { - this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION); + this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION); } public IncorrectVersionException(int versionReported, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 81f182d6ca..6e220d6bd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -166,8 +166,8 @@ public static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom) try { s = new Socket(); - s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); } catch (IOException e) { deadNodes.add(chosenNode); s.close(); @@ -188,8 +188,8 @@ public static void streamBlockInAscii(InetSocketAddress addr, String poolId, JspWriter out, Configuration conf) throws IOException { if (chunkSizeToView == 0) return; Socket s = new Socket(); - s.connect(addr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(addr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 19ad35bb9a..4c11973d4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -32,11 +32,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.util.VersionInfo; @@ -434,10 +434,10 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage) this.lock(); // lock storage if it exists - if (startOpt == HdfsConstants.StartupOption.FORMAT) + if (startOpt == HdfsServerConstants.StartupOption.FORMAT) return StorageState.NOT_FORMATTED; - if (startOpt != HdfsConstants.StartupOption.IMPORT) { + if (startOpt != HdfsServerConstants.StartupOption.IMPORT) { storage.checkOldLayoutStorage(this); } @@ -866,7 +866,7 @@ public static void deleteDir(File dir) throws IOException { * @throws IOException */ public void writeAll() throws IOException { - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; for (Iterator it = storageDirs.iterator(); it.hasNext();) { writeProperties(it.next()); } @@ -938,7 +938,7 @@ protected void setClusterId(Properties props, int layoutVersion, protected void setLayoutVersion(Properties props, StorageDirectory sd) throws IncorrectVersionException, InconsistentFSStateException { int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion")); - if (lv < FSConstants.LAYOUT_VERSION) { // future version + if (lv < HdfsConstants.LAYOUT_VERSION) { // future version throw new IncorrectVersionException(lv, "storage directory " + sd.root.getAbsolutePath()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java index 911dd407d4..405006bfb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java @@ -21,7 +21,7 @@ import java.util.SortedSet; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; /** @@ -69,7 +69,7 @@ public synchronized boolean initializeUpgrade() throws IOException { currentUpgrades = getDistributedUpgrades(); if(currentUpgrades == null) { // set new upgrade state - setUpgradeState(false, FSConstants.LAYOUT_VERSION); + setUpgradeState(false, HdfsConstants.LAYOUT_VERSION); return false; } Upgradeable curUO = currentUpgrades.first(); @@ -85,7 +85,7 @@ public synchronized boolean isUpgradeCompleted() { return false; } - public abstract HdfsConstants.NodeType getType(); + public abstract HdfsServerConstants.NodeType getType(); public abstract boolean startUpgrade() throws IOException; public abstract void completeUpgrade() throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java index c2558bed32..b59ef965d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java @@ -22,7 +22,7 @@ import java.util.TreeSet; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.StringUtils; /** @@ -40,7 +40,7 @@ public class UpgradeObjectCollection { static class UOSignature implements Comparable { int version; - HdfsConstants.NodeType type; + HdfsServerConstants.NodeType type; String className; UOSignature(Upgradeable uo) { @@ -53,7 +53,7 @@ int getVersion() { return version; } - HdfsConstants.NodeType getType() { + HdfsServerConstants.NodeType getType() { return type; } @@ -111,13 +111,13 @@ static void registerUpgrade(Upgradeable uo) { } public static SortedSet getDistributedUpgrades(int versionFrom, - HdfsConstants.NodeType type + HdfsServerConstants.NodeType type ) throws IOException { - assert FSConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version " - + versionFrom + ". Expected to be <= " + FSConstants.LAYOUT_VERSION; + assert HdfsConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version " + + versionFrom + ". Expected to be <= " + HdfsConstants.LAYOUT_VERSION; SortedSet upgradeObjects = new TreeSet(); for(UOSignature sig : upgradeTable) { - if(sig.getVersion() < FSConstants.LAYOUT_VERSION) + if(sig.getVersion() < HdfsConstants.LAYOUT_VERSION) continue; if(sig.getVersion() > versionFrom) break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java index 6081c4cfc6..016fd948e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java @@ -42,7 +42,7 @@ public interface Upgradeable extends Comparable { * Get the type of the software component, which this object is upgrading. * @return type */ - HdfsConstants.NodeType getType(); + HdfsServerConstants.NodeType getType(); /** * Description of the upgrade object for displaying. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index b547701b85..668b45bff8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -30,14 +30,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.HardLink; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; @@ -89,7 +89,7 @@ public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) { */ void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, Collection dataDirs, StartupOption startOpt) throws IOException { - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Block-pool and name-node layout versions must be the same."; // 1. For each BP data directory analyze the state and @@ -171,7 +171,7 @@ private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOExce LOG.info("Formatting block pool " + blockpoolID + " directory " + bpSdir.getCurrentDir()); bpSdir.clearDirectory(); // create directory - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; this.cTime = nsInfo.getCTime(); this.namespaceID = nsInfo.getNamespaceID(); this.blockpoolID = nsInfo.getBlockPoolID(); @@ -239,7 +239,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd, readProperties(sd); checkVersionUpgradable(this.layoutVersion); - assert this.layoutVersion >= FSConstants.LAYOUT_VERSION + assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION : "Future version is not allowed"; if (getNamespaceID() != nsInfo.getNamespaceID()) { throw new IOException("Incompatible namespaceIDs in " @@ -253,7 +253,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd, + nsInfo.getBlockPoolID() + "; datanode blockpoolID = " + blockpoolID); } - if (this.layoutVersion == FSConstants.LAYOUT_VERSION + if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) return; // regular startup @@ -261,7 +261,7 @@ private void doTransition(DataNode datanode, StorageDirectory sd, UpgradeManagerDatanode um = datanode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID()); verifyDistributedUpgradeProgress(um, nsInfo); - if (this.layoutVersion > FSConstants.LAYOUT_VERSION + if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION || this.cTime < nsInfo.getCTime()) { doUpgrade(sd, nsInfo); // upgrade return; @@ -327,7 +327,7 @@ void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException { // 3. Create new /current with block files hardlinks and VERSION linkAllBlocks(bpTmpDir, bpCurDir); - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; assert this.namespaceID == nsInfo.getNamespaceID() : "Data-node and name-node layout versions must be the same."; this.cTime = nsInfo.getCTime(); @@ -389,7 +389,7 @@ void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo) // the namespace state or can be further upgraded to it. // In another word, we can only roll back when ( storedLV >= software LV) // && ( DN.previousCTime <= NN.ctime) - if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION && + if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION && prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback throw new InconsistentFSStateException(bpSd.getRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index b51241ed3f..50e118aaa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; @@ -179,7 +179,7 @@ class BlockReceiver implements Closeable { this.out = streams.dataOut; this.cout = streams.checksumOut; this.checksumOut = new DataOutputStream(new BufferedOutputStream( - streams.checksumOut, FSConstants.SMALL_BUFFER_SIZE)); + streams.checksumOut, HdfsConstants.SMALL_BUFFER_SIZE)); // write data chunk header if creating a new replica if (isCreate) { BlockMetadataHeader.writeHeader(checksumOut, checksum); @@ -398,7 +398,7 @@ private void readNextPacket() throws IOException { buf.limit(bufRead); } - while (buf.remaining() < FSConstants.BYTES_IN_INTEGER) { + while (buf.remaining() < HdfsConstants.BYTES_IN_INTEGER) { if (buf.position() > 0) { shiftBufData(); } @@ -420,7 +420,7 @@ private void readNextPacket() throws IOException { // Subtract BYTES_IN_INTEGER since that accounts for the payloadLen that // we read above. int pktSize = payloadLen + PacketHeader.PKT_HEADER_LEN - - FSConstants.BYTES_IN_INTEGER; + - HdfsConstants.BYTES_IN_INTEGER; if (buf.remaining() < pktSize) { //we need to read more data diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index ac194622a3..b9e3858f3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -32,7 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; @@ -155,7 +155,7 @@ class BlockSender implements java.io.Closeable { if ( !corruptChecksumOk || datanode.data.metaFileExists(block) ) { checksumIn = new DataInputStream(new BufferedInputStream(datanode.data - .getMetaDataInputStream(block), FSConstants.IO_FILE_BUFFER_SIZE)); + .getMetaDataInputStream(block), HdfsConstants.IO_FILE_BUFFER_SIZE)); // read and handle the common header here. For now just a version BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); @@ -472,14 +472,14 @@ long sendBlock(DataOutputStream out, OutputStream baseStream, streamForSendChunks = baseStream; // assure a mininum buffer size. - maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE, + maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)/bytesPerChecksum; // allocate smaller buffer while using transferTo(). pktSize += checksumSize * maxChunksPerPacket; } else { - maxChunksPerPacket = Math.max(1, (FSConstants.IO_FILE_BUFFER_SIZE + maxChunksPerPacket = Math.max(1, (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum); pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index a9c29cc821..edc57fd797 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; @@ -83,9 +83,9 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.Storage; @@ -438,9 +438,9 @@ private static String getHostName(Configuration config) private void initConfig(Configuration conf) { this.socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsConstants.READ_TIMEOUT); + HdfsServerConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsConstants.WRITE_TIMEOUT); + HdfsServerConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean( @@ -623,7 +623,7 @@ private void initDataXceiver(Configuration conf) throws IOException { } else { ss = secureResources.getStreamingSocket(); } - ss.setReceiveBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE); + ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port int tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), @@ -768,9 +768,9 @@ private NamespaceInfo handshake() throws IOException { } catch (InterruptedException ie) {} } - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Data-node and name-node layout versions must be the same." - + "Expected: "+ FSConstants.LAYOUT_VERSION + + "Expected: "+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } @@ -814,7 +814,7 @@ void setupBPStorage() throws IOException { if (simulatedFSDataset) { initFsDataSet(conf, dataDirs); bpRegistration.setStorageID(getStorageId()); //same as DN - bpRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; + bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION; bpRegistration.storageInfo.namespaceID = bpNSInfo.namespaceID; bpRegistration.storageInfo.clusterID = bpNSInfo.clusterID; } else { @@ -1162,9 +1162,9 @@ void register() throws IOException { throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer); } - if (FSConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) { + if (HdfsConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) { LOG.warn("Data-node and name-node layout versions must be " + - "the same. Expected: "+ FSConstants.LAYOUT_VERSION + + "the same. Expected: "+ HdfsConstants.LAYOUT_VERSION + " actual "+ bpNSInfo.getLayoutVersion()); throw new IncorrectVersionException (bpNSInfo.getLayoutVersion(), "namenode"); @@ -1995,10 +1995,10 @@ public void run() { sock.setSoTimeout(targets.length * socketTimeout); long writeTimeout = socketWriteTimeout + - HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout); out = new DataOutputStream(new BufferedOutputStream(baseStream, - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, false, DataNode.this); DatanodeInfo srcNode = new DatanodeInfo(bpReg); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 784ab949ec..488c0188c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -43,15 +43,15 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Daemon; @@ -137,8 +137,8 @@ synchronized void recoverTransitionRead(DataNode datanode, // DN storage has been initialized, no need to do anything return; } - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : - "Data-node version " + FSConstants.LAYOUT_VERSION + + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + "Data-node version " + HdfsConstants.LAYOUT_VERSION + " and name-node layout version " + nsInfo.getLayoutVersion() + " must be the same."; @@ -268,7 +268,7 @@ static void makeBlockPoolDataDir(Collection dataDirs, void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { sd.clearDirectory(); // create directory - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; this.clusterID = nsInfo.getClusterID(); this.namespaceID = nsInfo.getNamespaceID(); this.cTime = 0; @@ -374,7 +374,7 @@ private void doTransition( DataNode datanode, } readProperties(sd); checkVersionUpgradable(this.layoutVersion); - assert this.layoutVersion >= FSConstants.LAYOUT_VERSION : + assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION : "Future version is not allowed"; boolean federationSupported = @@ -397,7 +397,7 @@ private void doTransition( DataNode datanode, } // regular start up - if (this.layoutVersion == FSConstants.LAYOUT_VERSION + if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION && this.cTime == nsInfo.getCTime()) return; // regular startup // verify necessity of a distributed upgrade @@ -406,7 +406,7 @@ private void doTransition( DataNode datanode, verifyDistributedUpgradeProgress(um, nsInfo); // do upgrade - if (this.layoutVersion > FSConstants.LAYOUT_VERSION + if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION || this.cTime < nsInfo.getCTime()) { doUpgrade(sd, nsInfo); // upgrade return; @@ -482,7 +482,7 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT)); // 4. Write version file under /current - layoutVersion = FSConstants.LAYOUT_VERSION; + layoutVersion = HdfsConstants.LAYOUT_VERSION; clusterID = nsInfo.getClusterID(); writeProperties(sd); @@ -542,7 +542,7 @@ void doRollback( StorageDirectory sd, // We allow rollback to a state, which is either consistent with // the namespace state or can be further upgraded to it. - if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION + if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION && prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback throw new InconsistentFSStateException(sd.getRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 374d309504..8d7d95f8aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; @@ -53,7 +53,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.io.IOUtils; @@ -86,7 +86,7 @@ class DataXceiver extends Receiver implements Runnable { public DataXceiver(Socket s, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { super(new DataInputStream(new BufferedInputStream( - NetUtils.getInputStream(s), FSConstants.SMALL_BUFFER_SIZE))); + NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE))); this.s = s; this.isLocal = s.getInetAddress().equals(s.getLocalAddress()); @@ -203,7 +203,7 @@ public void readBlock(final ExtendedBlock block, OutputStream baseStream = NetUtils.getOutputStream(s, datanode.socketWriteTimeout); DataOutputStream out = new DataOutputStream(new BufferedOutputStream( - baseStream, FSConstants.SMALL_BUFFER_SIZE)); + baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); checkAccess(out, true, block, blockToken, Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ); @@ -329,7 +329,7 @@ public void writeBlock(final ExtendedBlock block, final DataOutputStream replyOut = new DataOutputStream( new BufferedOutputStream( NetUtils.getOutputStream(s, datanode.socketWriteTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE); @@ -364,16 +364,16 @@ public void writeBlock(final ExtendedBlock block, mirrorSock = datanode.newSocket(); try { int timeoutValue = datanode.socketTimeout - + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length); + + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length); int writeTimeout = datanode.socketWriteTimeout + - (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length); + (HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setSoTimeout(timeoutValue); - mirrorSock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE); + mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); mirrorOut = new DataOutputStream( new BufferedOutputStream( NetUtils.getOutputStream(mirrorSock, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); new Sender(mirrorOut).writeBlock(originalBlock, blockToken, @@ -524,7 +524,7 @@ public void blockChecksum(final ExtendedBlock block, final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream( - metadataIn, FSConstants.IO_FILE_BUFFER_SIZE)); + metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); updateCurrentThreadName("Getting checksum for block " + block); try { @@ -603,7 +603,7 @@ public void copyBlock(final ExtendedBlock block, OutputStream baseStream = NetUtils.getOutputStream( s, datanode.socketWriteTimeout); reply = new DataOutputStream(new BufferedOutputStream( - baseStream, FSConstants.SMALL_BUFFER_SIZE)); + baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); // send status first writeResponse(SUCCESS, reply); @@ -682,14 +682,14 @@ public void replaceBlock(final ExtendedBlock block, OutputStream baseStream = NetUtils.getOutputStream(proxySock, datanode.socketWriteTimeout); proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream, - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); /* send request to the proxy */ new Sender(proxyOut).copyBlock(block, blockToken); // receive the response from the proxy proxyReply = new DataInputStream(new BufferedInputStream( - NetUtils.getInputStream(proxySock), FSConstants.IO_FILE_BUFFER_SIZE)); + NetUtils.getInputStream(proxySock), HdfsConstants.IO_FILE_BUFFER_SIZE)); BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( HdfsProtoUtil.vintPrefixed(proxyReply)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index 89928a2971..f192747db5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.balancer.Balancer; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index 8348c8f9c9..5ecdca7b79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -53,10 +53,10 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; @@ -465,7 +465,7 @@ private long validateIntegrity(File blockFile, long genStamp) { } checksumIn = new DataInputStream( new BufferedInputStream(new FileInputStream(metaFile), - FSConstants.IO_FILE_BUFFER_SIZE)); + HdfsConstants.IO_FILE_BUFFER_SIZE)); // read and handle the common header here. For now just a version BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java index 76b0bba209..d0fc32c769 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java @@ -20,7 +20,7 @@ import java.io.File; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java index d2ab20e914..bd0485394a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; /** * This represents block replicas which are stored in DataNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java index 921437df20..d2a6f46c2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java @@ -20,7 +20,7 @@ import java.io.File; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; /** This class represents replicas being written. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java index d246f6f8dc..447b9337ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java @@ -23,7 +23,7 @@ import java.io.RandomAccessFile; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java index c2cb5cfc40..972353962c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java @@ -19,7 +19,7 @@ import java.io.File; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java index 86bef1ea38..91045b7ea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java @@ -20,7 +20,7 @@ import java.io.File; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index d92b5913da..c6744f9317 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.http.HttpServer; import org.mortbay.jetty.nio.SelectChannelConnector; @@ -71,7 +71,7 @@ public void init(DaemonContext context) throws Exception { // Obtain secure port for data streaming to datanode InetSocketAddress socAddr = DataNode.getStreamingAddr(conf); int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsConstants.WRITE_TIMEOUT); + HdfsServerConstants.WRITE_TIMEOUT); ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java index 5fc2f2b5d6..478fb5660d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java @@ -19,8 +19,8 @@ import java.io.IOException; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeManager; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -47,8 +47,8 @@ class UpgradeManagerDatanode extends UpgradeManager { this.bpid = bpid; } - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.DATA_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.DATA_NODE; } synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException { @@ -57,7 +57,7 @@ synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException { DataNode.LOG.info("\n Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is initialized."); + + HdfsConstants.LAYOUT_VERSION + " is initialized."); UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first(); curUO.setDatanode(dataNode, this.bpid); upgradeState = curUO.preUpgradeAction(nsInfo); @@ -102,7 +102,7 @@ public synchronized boolean startUpgrade() throws IOException { if(currentUpgrades == null) { DataNode.LOG.info("\n Distributed upgrade for DataNode version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " cannot be started. " + + HdfsConstants.LAYOUT_VERSION + " cannot be started. " + "The upgrade object is not defined."); return false; } @@ -115,7 +115,7 @@ public synchronized boolean startUpgrade() throws IOException { DataNode.LOG.info("\n Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is started."); + + HdfsConstants.LAYOUT_VERSION + " is started."); return true; } @@ -130,7 +130,7 @@ synchronized void processUpgradeCommand(UpgradeCommand command throw new IOException( "Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " cannot be started. " + + HdfsConstants.LAYOUT_VERSION + " cannot be started. " + "The upgrade object is not defined."); } @@ -145,7 +145,7 @@ public synchronized void completeUpgrade() throws IOException { DataNode.LOG.info("\n Distributed upgrade for DataNode " + dataNode.getMachineName() + " version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is complete."); + + HdfsConstants.LAYOUT_VERSION + " is complete."); } synchronized void shutdownUpgrade() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java index 9e51f230f9..ddb1d6029f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeObject; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -36,8 +36,8 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run private DataNode dataNode = null; private String bpid = null; - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.DATA_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.DATA_NODE; } protected DataNode getDatanode() { @@ -118,7 +118,7 @@ public void run() { if(getUpgradeStatus() < 100) { DataNode.LOG.info("\n Distributed upgrade for DataNode version " + getVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " cannot be completed."); + + HdfsConstants.LAYOUT_VERSION + " cannot be completed."); } // Complete the upgrade by calling the manager method diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index d72509cee2..dd68261253 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -20,20 +20,21 @@ import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.IOException; +import java.util.Collection; import java.util.Iterator; +import java.util.List; import java.util.zip.Checksum; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageState; -import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.LogLoadPlan; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; /** * Extension of FSImage for the backup node. @@ -81,6 +82,8 @@ static enum BNState { * {@see #freezeNamespaceAtNextRoll()} */ private boolean stopApplyingEditsOnNextRoll = false; + + private FSNamesystem namesystem; /** * Construct a backup image. @@ -92,6 +95,10 @@ static enum BNState { storage.setDisablePreUpgradableLayoutCheck(true); bnState = BNState.DROP_UNTIL_NEXT_ROLL; } + + void setNamesystem(FSNamesystem fsn) { + this.namesystem = fsn; + } /** * Analyze backup storage directories for consistency.
@@ -106,7 +113,7 @@ void recoverCreateRead() throws IOException { StorageDirectory sd = it.next(); StorageState curState; try { - curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage); + curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: @@ -140,7 +147,7 @@ void recoverCreateRead() throws IOException { * and create empty edits. */ void saveCheckpoint() throws IOException { - saveNamespace(); + saveNamespace(namesystem); } /** @@ -223,7 +230,7 @@ private synchronized void applyEdits(long firstTxId, int numTxns, byte[] data) } lastAppliedTxId += numTxns; - getFSNamesystem().dir.updateCountForINodeWithQuota(); // inefficient! + namesystem.dir.updateCountForINodeWithQuota(); // inefficient! } finally { backupInputStream.clear(); } @@ -261,11 +268,18 @@ private boolean tryConvergeJournalSpool() throws IOException { new FSImageTransactionalStorageInspector(); storage.inspectStorageDirs(inspector); - LogLoadPlan logLoadPlan = inspector.createLogLoadPlan(lastAppliedTxId, - target - 1); - - logLoadPlan.doRecovery(); - loadEdits(logLoadPlan.getEditsFiles()); + + editLog.recoverUnclosedStreams(); + Iterable editStreamsAll + = editLog.selectInputStreams(lastAppliedTxId, target - 1); + // remove inprogress + List editStreams = Lists.newArrayList(); + for (EditLogInputStream s : editStreamsAll) { + if (s.getFirstTxId() != editLog.getCurSegmentTxId()) { + editStreams.add(s); + } + } + loadEdits(editStreams, namesystem); } // now, need to load the in-progress file @@ -275,7 +289,24 @@ private boolean tryConvergeJournalSpool() throws IOException { return false; // drop lock and try again to load local logs } - EditLogInputStream stream = getEditLog().getInProgressFileInputStream(); + EditLogInputStream stream = null; + Collection editStreams + = getEditLog().selectInputStreams( + getEditLog().getCurSegmentTxId(), + getEditLog().getCurSegmentTxId()); + + for (EditLogInputStream s : editStreams) { + if (s.getFirstTxId() == getEditLog().getCurSegmentTxId()) { + stream = s; + } + break; + } + if (stream == null) { + LOG.warn("Unable to find stream starting with " + editLog.getCurSegmentTxId() + + ". This indicates that there is an error in synchronization in BackupImage"); + return false; + } + try { long remainingTxns = getEditLog().getLastWrittenTxId() - lastAppliedTxId; @@ -289,7 +320,7 @@ private boolean tryConvergeJournalSpool() throws IOException { "expected to load " + remainingTxns + " but loaded " + numLoaded + " from " + stream; } finally { - IOUtils.closeStream(stream); + FSEditLog.closeAllStreams(editStreams); } LOG.info("Successfully synced BackupNode with NameNode at txnid " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java index 3cac6676f1..6976620341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java @@ -58,12 +58,31 @@ public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException { } + @Override + public long getNumberOfTransactions(long fromTxnId) + throws IOException, CorruptionException { + // This JournalManager is never used for input. Therefore it cannot + // return any transactions + return 0; + } + + @Override + public EditLogInputStream getInputStream(long fromTxnId) throws IOException { + // This JournalManager is never used for input. Therefore it cannot + // return any transactions + throw new IOException("Unsupported operation"); + } + + @Override + public void recoverUnfinalizedSegments() throws IOException { + } + public boolean matchesRegistration(NamenodeRegistration bnReg) { return bnReg.getAddress().equals(this.bnReg.getAddress()); } @Override - public EditLogInputStream getInProgressInputStream(long segmentStartsAtTxId) { - return null; + public String toString() { + return "BackupJournalManager"; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 25667b65a2..d8f68a0aaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -26,8 +26,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; @@ -52,7 +52,7 @@ * */ @InterfaceAudience.Private -public class BackupNode extends NameNode implements JournalProtocol { +public class BackupNode extends NameNode { private static final String BN_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT; private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY; @@ -95,18 +95,20 @@ protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) throw } @Override // NameNode - protected void setRpcServerAddress(Configuration conf) { - conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(rpcAddress)); + protected void setRpcServerAddress(Configuration conf, + InetSocketAddress addr) { + conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(addr)); } @Override // Namenode - protected void setRpcServiceServerAddress(Configuration conf) { - conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(serviceRPCAddress)); + protected void setRpcServiceServerAddress(Configuration conf, + InetSocketAddress addr) { + conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(addr)); } @Override // NameNode protected InetSocketAddress getHttpServerAddress(Configuration conf) { - assert rpcAddress != null : "rpcAddress should be calculated first"; + assert getNameNodeAddress() != null : "rpcAddress should be calculated first"; String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } @@ -120,6 +122,7 @@ protected void setHttpServerAddress(Configuration conf){ protected void loadNamesystem(Configuration conf) throws IOException { BackupImage bnImage = new BackupImage(conf); this.namesystem = new FSNamesystem(conf, bnImage); + bnImage.setNamesystem(namesystem); bnImage.recoverCreateRead(); } @@ -134,7 +137,7 @@ protected void initialize(Configuration conf) throws IOException { // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( - FSConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); + HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); clusterId = nsInfo.getClusterID(); blockPoolId = nsInfo.getBlockPoolID(); @@ -145,6 +148,12 @@ protected void initialize(Configuration conf) throws IOException { runCheckpointDaemon(conf); } + @Override + protected NameNodeRpcServer createRpcServer(Configuration conf) + throws IOException { + return new BackupNodeRpcServer(conf, this); + } + @Override // NameNode public void stop() { if(checkpointManager != null) { @@ -177,48 +186,58 @@ public void stop() { super.stop(); } - - @Override - public long getProtocolVersion(String protocol, long clientVersion) - throws IOException { - if (protocol.equals(JournalProtocol.class.getName())) { - return JournalProtocol.versionID; - } else { - return super.getProtocolVersion(protocol, clientVersion); + static class BackupNodeRpcServer extends NameNodeRpcServer implements JournalProtocol { + private final String nnRpcAddress; + + private BackupNodeRpcServer(Configuration conf, BackupNode nn) + throws IOException { + super(conf, nn); + this.server.addProtocol(JournalProtocol.class, this); + nnRpcAddress = nn.nnRpcAddress; + } + + @Override + public long getProtocolVersion(String protocol, long clientVersion) + throws IOException { + if (protocol.equals(JournalProtocol.class.getName())) { + return JournalProtocol.versionID; + } else { + return super.getProtocolVersion(protocol, clientVersion); + } + } + + ///////////////////////////////////////////////////// + // BackupNodeProtocol implementation for backup node. + ///////////////////////////////////////////////////// + @Override + public void startLogSegment(NamenodeRegistration registration, long txid) + throws IOException { + nn.checkOperation(OperationCategory.JOURNAL); + verifyRequest(registration); + verifyRequest(registration); + + getBNImage().namenodeStartedLogSegment(txid); + } + + @Override + public void journal(NamenodeRegistration nnReg, + long firstTxId, int numTxns, + byte[] records) throws IOException { + nn.checkOperation(OperationCategory.JOURNAL); + verifyRequest(nnReg); + if(!nnRpcAddress.equals(nnReg.getAddress())) + throw new IOException("Journal request from unexpected name-node: " + + nnReg.getAddress() + " expecting " + nnRpcAddress); + getBNImage().journal(firstTxId, numTxns, records); + } + + private BackupImage getBNImage() { + return (BackupImage)nn.getFSImage(); } } - - ///////////////////////////////////////////////////// - // BackupNodeProtocol implementation for backup node. - ///////////////////////////////////////////////////// - - @Override - public void journal(NamenodeRegistration nnReg, - long firstTxId, int numTxns, - byte[] records) throws IOException { - checkOperation(OperationCategory.JOURNAL); - verifyRequest(nnReg); - if(!nnRpcAddress.equals(nnReg.getAddress())) - throw new IOException("Journal request from unexpected name-node: " - + nnReg.getAddress() + " expecting " + nnRpcAddress); - getBNImage().journal(firstTxId, numTxns, records); - } - - @Override - public void startLogSegment(NamenodeRegistration registration, long txid) - throws IOException { - checkOperation(OperationCategory.JOURNAL); - verifyRequest(registration); - getBNImage().namenodeStartedLogSegment(txid); - } - ////////////////////////////////////////////////////// - - BackupImage getBNImage() { - return (BackupImage)getFSImage(); - } boolean shouldCheckpointAtStartup() { FSImage fsImage = getFSImage(); @@ -330,9 +349,9 @@ private static NamespaceInfo handshake(NamenodeProtocol namenode) LOG.fatal(errorMsg); throw new IOException(errorMsg); } - assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : + assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Active and backup node layout versions must be the same. Expected: " - + FSConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); + + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java index e4de6345b6..2a41aeeb9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java @@ -69,7 +69,7 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res try { ugi.doAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - nn.cancelDelegationToken(token); + nn.getRpcServer().cancelDelegationToken(token); return null; } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index f75410031d..5e544c6695 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; @@ -224,7 +224,7 @@ void doCheckpoint() throws IOException { LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId); File file = bnStorage.findImageFile(sig.mostRecentCheckpointTxId); - bnImage.reloadFromImageFile(file); + bnImage.reloadFromImageFile(file, backupNode.getNamesystem()); } lastApplied = bnImage.getLastAppliedTxId(); @@ -238,11 +238,11 @@ void doCheckpoint() throws IOException { backupNode.nnHttpAddress, log, bnStorage); } - rollForwardByApplyingLogs(manifest, bnImage); + rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem()); } long txid = bnImage.getLastAppliedTxId(); - bnImage.saveFSImageInAllDirs(txid); + bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid); bnStorage.writeAll(); if(cpCmd.needToReturnImage()) { @@ -272,19 +272,21 @@ private InetSocketAddress getImageListenAddress() { static void rollForwardByApplyingLogs( RemoteEditLogManifest manifest, - FSImage dstImage) throws IOException { + FSImage dstImage, + FSNamesystem dstNamesystem) throws IOException { NNStorage dstStorage = dstImage.getStorage(); - List editsFiles = Lists.newArrayList(); + List editsStreams = Lists.newArrayList(); for (RemoteEditLog log : manifest.getLogs()) { File f = dstStorage.findFinalizedEditsFile( log.getStartTxId(), log.getEndTxId()); if (log.getStartTxId() > dstImage.getLastAppliedTxId()) { - editsFiles.add(f); - } + editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(), + log.getEndTxId())); + } } LOG.info("Checkpointer about to load edits from " + - editsFiles.size() + " file(s)."); - dstImage.loadEdits(editsFiles); + editsStreams.size() + " stream(s)."); + dstImage.loadEdits(editsStreams, dstNamesystem); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java index ea0f392a3d..1c8253f665 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java @@ -73,7 +73,7 @@ protected ClientProtocol createNameNodeProxy() throws IOException { // rpc NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); if (nn != null) { - return nn; + return nn.getRpcServer(); } InetSocketAddress nnAddr = NameNodeHttpServer.getNameNodeAddressFromContext(context); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java index 8921bc0c55..974697d927 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java @@ -21,6 +21,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import com.google.common.base.Preconditions; /** @@ -122,4 +123,14 @@ void clear() throws IOException { reader = null; this.version = 0; } + + @Override + public long getFirstTxId() throws IOException { + return HdfsConstants.INVALID_TXID; + } + + @Override + public long getLastTxId() throws IOException { + return HdfsConstants.INVALID_TXID; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 532b2f2dcf..9db7f8ae66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -24,10 +24,11 @@ import java.io.BufferedInputStream; import java.io.EOFException; import java.io.DataInputStream; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import com.google.common.annotations.VisibleForTesting; @@ -38,12 +39,15 @@ class EditLogFileInputStream extends EditLogInputStream { private final File file; private final FileInputStream fStream; + final private long firstTxId; + final private long lastTxId; private final int logVersion; private final FSEditLogOp.Reader reader; private final FSEditLogLoader.PositionTrackingInputStream tracker; /** * Open an EditLogInputStream for the given file. + * The file is pretransactional, so has no txids * @param name filename to open * @throws LogHeaderCorruptException if the header is either missing or * appears to be corrupt/truncated @@ -52,6 +56,21 @@ class EditLogFileInputStream extends EditLogInputStream { */ EditLogFileInputStream(File name) throws LogHeaderCorruptException, IOException { + this(name, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID); + } + + /** + * Open an EditLogInputStream for the given file. + * @param name filename to open + * @param firstTxId first transaction found in file + * @param lastTxId last transaction id found in file + * @throws LogHeaderCorruptException if the header is either missing or + * appears to be corrupt/truncated + * @throws IOException if an actual IO error occurs while reading the + * header + */ + EditLogFileInputStream(File name, long firstTxId, long lastTxId) + throws LogHeaderCorruptException, IOException { file = name; fStream = new FileInputStream(name); @@ -66,6 +85,18 @@ class EditLogFileInputStream extends EditLogInputStream { } reader = new FSEditLogOp.Reader(in, logVersion); + this.firstTxId = firstTxId; + this.lastTxId = lastTxId; + } + + @Override + public long getFirstTxId() throws IOException { + return firstTxId; + } + + @Override + public long getLastTxId() throws IOException { + return lastTxId; } @Override // JournalStream @@ -117,7 +148,8 @@ static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOExc // If it's missing its header, this is equivalent to no transactions FSImage.LOG.warn("Log at " + file + " has no valid header", corrupt); - return new FSEditLogLoader.EditLogValidation(0, 0); + return new FSEditLogLoader.EditLogValidation(0, HdfsConstants.INVALID_TXID, + HdfsConstants.INVALID_TXID); } try { @@ -143,11 +175,11 @@ static int readLogVersion(DataInputStream in) throw new LogHeaderCorruptException( "Reached EOF when reading log header"); } - if (logVersion < FSConstants.LAYOUT_VERSION) { // future version + if (logVersion < HdfsConstants.LAYOUT_VERSION) { // future version throw new LogHeaderCorruptException( "Unexpected version of the file system log file: " + logVersion + ". Current version = " - + FSConstants.LAYOUT_VERSION + "."); + + HdfsConstants.LAYOUT_VERSION + "."); } assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION : "Unsupported version " + logVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index f79f44266e..be75f637a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.IOUtils; import com.google.common.annotations.VisibleForTesting; @@ -109,7 +109,7 @@ void writeRaw(byte[] bytes, int offset, int length) throws IOException { void create() throws IOException { fc.truncate(0); fc.position(0); - doubleBuf.getCurrentBuf().writeInt(FSConstants.LAYOUT_VERSION); + doubleBuf.getCurrentBuf().writeInt(HdfsConstants.LAYOUT_VERSION); setReadyToFlush(); flush(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java index 52a3dd4c20..c6f850542f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java @@ -28,6 +28,17 @@ * into the #{@link EditLogOutputStream}. */ abstract class EditLogInputStream implements JournalStream, Closeable { + /** + * @return the first transaction which will be found in this stream + */ + public abstract long getFirstTxId() throws IOException; + + /** + * @return the last transaction which will be found in this stream + */ + public abstract long getLastTxId() throws IOException; + + /** * Close the stream. * @throws IOException if an error occurred while closing diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index 0dd90588f4..5312b145ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.io.OutputStream; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; @@ -129,7 +129,7 @@ public TxnBuffer(int initBufferSize) { } public void writeOp(FSEditLogOp op) throws IOException { - if (firstTxId == FSConstants.INVALID_TXID) { + if (firstTxId == HdfsConstants.INVALID_TXID) { firstTxId = op.txid; } else { assert op.txid > firstTxId; @@ -141,7 +141,7 @@ public void writeOp(FSEditLogOp op) throws IOException { @Override public DataOutputBuffer reset() { super.reset(); - firstTxId = FSConstants.INVALID_TXID; + firstTxId = HdfsConstants.INVALID_TXID; numTxns = 0; return this; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 4ad7c7e451..4d7f2b9ca6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; @@ -55,10 +55,11 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.util.ByteArray; +import com.google.common.base.Preconditions; + /************************************************* * FSDirectory stores the filesystem directory state. * It handles writing/loading values to disk, and logging @@ -72,6 +73,7 @@ public class FSDirectory implements Closeable { INodeDirectoryWithQuota rootDir; FSImage fsImage; + private final FSNamesystem namesystem; private volatile boolean ready = false; private static final long UNKNOWN_DISK_SPACE = -1; private final int maxComponentLength; @@ -113,15 +115,9 @@ boolean hasReadLock() { */ private final NameCache nameCache; - /** Access an existing dfs name directory. */ - FSDirectory(FSNamesystem ns, Configuration conf) throws IOException { - this(new FSImage(conf), ns, conf); - } - FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) { this.dirLock = new ReentrantReadWriteLock(true); // fair this.cond = dirLock.writeLock().newCondition(); - fsImage.setFSNamesystem(ns); rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, ns.createFsOwnerPermissions(new FsPermission((short)0755)), Integer.MAX_VALUE, UNKNOWN_DISK_SPACE); @@ -145,10 +141,11 @@ boolean hasReadLock() { NameNode.LOG.info("Caching file names occuring more than " + threshold + " times "); nameCache = new NameCache(threshold); + namesystem = ns; } private FSNamesystem getFSNamesystem() { - return fsImage.getFSNamesystem(); + return namesystem; } private BlockManager getBlockManager() { @@ -156,33 +153,11 @@ private BlockManager getBlockManager() { } /** - * Load the filesystem image into memory. - * - * @param startOpt Startup type as specified by the user. - * @throws IOException If image or editlog cannot be read. + * Notify that loading of this FSDirectory is complete, and + * it is ready for use */ - void loadFSImage(StartupOption startOpt) - throws IOException { - // format before starting up if requested - if (startOpt == StartupOption.FORMAT) { - fsImage.format(fsImage.getStorage().determineClusterId());// reuse current id - - startOpt = StartupOption.REGULAR; - } - boolean success = false; - try { - if (fsImage.recoverTransitionRead(startOpt)) { - fsImage.saveNamespace(); - } - fsImage.openEditLog(); - - fsImage.setCheckpointDirectories(null, null); - success = true; - } finally { - if (!success) { - fsImage.close(); - } - } + void imageLoadComplete() { + Preconditions.checkState(!ready, "FSDirectory already loaded"); writeLock(); try { setReady(true); @@ -1876,10 +1851,10 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) UnresolvedLinkException { assert hasWriteLock(); // sanity check - if ((nsQuota < 0 && nsQuota != FSConstants.QUOTA_DONT_SET && - nsQuota < FSConstants.QUOTA_RESET) || - (dsQuota < 0 && dsQuota != FSConstants.QUOTA_DONT_SET && - dsQuota < FSConstants.QUOTA_RESET)) { + if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET && + nsQuota < HdfsConstants.QUOTA_RESET) || + (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET && + dsQuota < HdfsConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Illegal value for nsQuota or " + "dsQuota : " + nsQuota + " and " + dsQuota); @@ -1893,16 +1868,16 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) throw new FileNotFoundException("Directory does not exist: " + srcs); } else if (!targetNode.isDirectory()) { throw new FileNotFoundException("Cannot set quota on a file: " + srcs); - } else if (targetNode.isRoot() && nsQuota == FSConstants.QUOTA_RESET) { + } else if (targetNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) { throw new IllegalArgumentException("Cannot clear namespace quota on root."); } else { // a directory inode INodeDirectory dirNode = (INodeDirectory)targetNode; long oldNsQuota = dirNode.getNsQuota(); long oldDsQuota = dirNode.getDsQuota(); - if (nsQuota == FSConstants.QUOTA_DONT_SET) { + if (nsQuota == HdfsConstants.QUOTA_DONT_SET) { nsQuota = oldNsQuota; } - if (dsQuota == FSConstants.QUOTA_DONT_SET) { + if (dsQuota == HdfsConstants.QUOTA_DONT_SET) { dsQuota = oldDsQuota; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 495c42e45a..e355a9d838 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -29,17 +30,19 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import static org.apache.hadoop.hdfs.server.common.Util.now; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; +import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.security.token.delegation.DelegationKey; +import org.apache.hadoop.io.IOUtils; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -91,7 +94,7 @@ private enum State { // the first txid of the log that's currently open for writing. // If this value is N, we are currently writing to edits_inprogress_N - private long curSegmentTxId = FSConstants.INVALID_TXID; + private long curSegmentTxId = HdfsConstants.INVALID_TXID; // the time of printing the statistics to the log file. private long lastPrintTime; @@ -904,7 +907,7 @@ public void purgeLogsOlderThan(final long minTxIdToKeep) { // synchronized to prevent findbugs warning about inconsistent // synchronization. This will be JIT-ed out if asserts are // off. - assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op + assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op minTxIdToKeep <= curSegmentTxId : "cannot purge logs older than txid " + minTxIdToKeep + " when current segment starts at " + curSegmentTxId; @@ -1068,6 +1071,112 @@ private void disableAndReportErrorOnJournals(List badJournals) } } + /** + * Find the best editlog input stream to read from txid. In this case + * best means the editlog which has the largest continuous range of + * transactions starting from the transaction id, fromTxId. + * + * If a journal throws an CorruptionException while reading from a txn id, + * it means that it has more transactions, but can't find any from fromTxId. + * If this is the case and no other journal has transactions, we should throw + * an exception as it means more transactions exist, we just can't load them. + * + * @param fromTxId Transaction id to start from. + * @return a edit log input stream with tranactions fromTxId + * or null if no more exist + */ + private EditLogInputStream selectStream(long fromTxId) + throws IOException { + JournalManager bestjm = null; + long bestjmNumTxns = 0; + CorruptionException corruption = null; + + for (JournalAndStream jas : journals) { + JournalManager candidate = jas.getManager(); + long candidateNumTxns = 0; + try { + candidateNumTxns = candidate.getNumberOfTransactions(fromTxId); + } catch (CorruptionException ce) { + corruption = ce; + } catch (IOException ioe) { + LOG.warn("Error reading number of transactions from " + candidate); + continue; // error reading disk, just skip + } + + if (candidateNumTxns > bestjmNumTxns) { + bestjm = candidate; + bestjmNumTxns = candidateNumTxns; + } + } + + + if (bestjm == null) { + /** + * If all candidates either threw a CorruptionException or + * found 0 transactions, then a gap exists. + */ + if (corruption != null) { + throw new IOException("Gap exists in logs from " + + fromTxId, corruption); + } else { + return null; + } + } + + return bestjm.getInputStream(fromTxId); + } + + /** + * Run recovery on all journals to recover any unclosed segments + */ + void recoverUnclosedStreams() { + mapJournalsAndReportErrors(new JournalClosure() { + @Override + public void apply(JournalAndStream jas) throws IOException { + jas.manager.recoverUnfinalizedSegments(); + } + }, "recovering unclosed streams"); + } + + /** + * Select a list of input streams to load. + * @param fromTxId first transaction in the selected streams + * @param toAtLeast the selected streams must contain this transaction + */ + Collection selectInputStreams(long fromTxId, long toAtLeastTxId) + throws IOException { + List streams = Lists.newArrayList(); + + boolean gapFound = false; + EditLogInputStream stream = selectStream(fromTxId); + while (stream != null) { + fromTxId = stream.getLastTxId() + 1; + streams.add(stream); + try { + stream = selectStream(fromTxId); + } catch (IOException ioe) { + gapFound = true; + break; + } + } + if (fromTxId <= toAtLeastTxId || gapFound) { + closeAllStreams(streams); + throw new IOException("No non-corrupt logs for txid " + + fromTxId); + } + return streams; + } + + /** + * Close all the streams in a collection + * @param streams The list of streams to close + */ + static void closeAllStreams(Iterable streams) { + for (EditLogInputStream s : streams) { + IOUtils.closeStream(s); + } + } + /** * Container for a JournalManager paired with its currently * active stream. @@ -1078,7 +1187,7 @@ private void disableAndReportErrorOnJournals(List badJournals) static class JournalAndStream { private final JournalManager manager; private EditLogOutputStream stream; - private long segmentStartsAtTxId = FSConstants.INVALID_TXID; + private long segmentStartsAtTxId = HdfsConstants.INVALID_TXID; private JournalAndStream(JournalManager manager) { this.manager = manager; @@ -1110,7 +1219,7 @@ void abort() { LOG.error("Unable to abort stream " + stream, ioe); } stream = null; - segmentStartsAtTxId = FSConstants.INVALID_TXID; + segmentStartsAtTxId = HdfsConstants.INVALID_TXID; } private boolean isActive() { @@ -1137,30 +1246,5 @@ void setCurrentStreamForTests(EditLogOutputStream stream) { JournalManager getManager() { return manager; } - - private EditLogInputStream getInProgressInputStream() throws IOException { - return manager.getInProgressInputStream(segmentStartsAtTxId); - } - } - - /** - * @return an EditLogInputStream that reads from the same log that - * the edit log is currently writing. This is used from the BackupNode - * during edits synchronization. - * @throws IOException if no valid logs are available. - */ - synchronized EditLogInputStream getInProgressFileInputStream() - throws IOException { - for (JournalAndStream jas : journals) { - if (!jas.isActive()) continue; - try { - EditLogInputStream in = jas.getInProgressInputStream(); - if (in != null) return in; - } catch (IOException ioe) { - LOG.warn("Unable to get the in-progress input stream from " + jas, - ioe); - } - } - throw new IOException("No in-progress stream provided edits"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index db985691f6..991fd08c84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -27,7 +27,7 @@ import java.util.EnumMap; import org.apache.hadoop.fs.permission.PermissionStatus; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; @@ -274,14 +274,14 @@ int loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit, SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op; fsDir.unprotectedSetQuota(setNSQuotaOp.src, setNSQuotaOp.nsQuota, - FSConstants.QUOTA_DONT_SET); + HdfsConstants.QUOTA_DONT_SET); break; } case OP_CLEAR_NS_QUOTA: { ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op; fsDir.unprotectedSetQuota(clearNSQuotaOp.src, - FSConstants.QUOTA_RESET, - FSConstants.QUOTA_DONT_SET); + HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_DONT_SET); break; } @@ -435,7 +435,7 @@ private void check203UpgradeFailure(int logVersion, IOException ex) // The editlog must be emptied by restarting the namenode, before proceeding // with the upgrade. if (Storage.is203LayoutVersion(logVersion) - && logVersion != FSConstants.LAYOUT_VERSION) { + && logVersion != HdfsConstants.LAYOUT_VERSION) { String msg = "During upgrade failed to load the editlog version " + logVersion + " from release 0.20.203. Please go back to the old " + " release and restart the namenode. This empties the editlog " @@ -446,24 +446,6 @@ private void check203UpgradeFailure(int logVersion, IOException ex) } } - static EditLogValidation validateEditLog(File file) throws IOException { - EditLogFileInputStream in; - try { - in = new EditLogFileInputStream(file); - } catch (LogHeaderCorruptException corrupt) { - // If it's missing its header, this is equivalent to no transactions - FSImage.LOG.warn("Log at " + file + " has no valid header", - corrupt); - return new EditLogValidation(0, 0); - } - - try { - return validateEditLog(in); - } finally { - IOUtils.closeStream(in); - } - } - /** * Return the number of valid transactions in the stream. If the stream is * truncated during the header, returns a value indicating that there are @@ -473,12 +455,26 @@ static EditLogValidation validateEditLog(File file) throws IOException { * if the log does not exist) */ static EditLogValidation validateEditLog(EditLogInputStream in) { - long numValid = 0; long lastPos = 0; + long firstTxId = HdfsConstants.INVALID_TXID; + long lastTxId = HdfsConstants.INVALID_TXID; + long numValid = 0; try { + FSEditLogOp op = null; while (true) { lastPos = in.getPosition(); - if (in.readOp() == null) { + if ((op = in.readOp()) == null) { + break; + } + if (firstTxId == HdfsConstants.INVALID_TXID) { + firstTxId = op.txid; + } + if (lastTxId == HdfsConstants.INVALID_TXID + || op.txid == lastTxId + 1) { + lastTxId = op.txid; + } else { + FSImage.LOG.error("Out of order txid found. Found " + op.txid + + ", expected " + (lastTxId + 1)); break; } numValid++; @@ -489,16 +485,33 @@ static EditLogValidation validateEditLog(EditLogInputStream in) { FSImage.LOG.debug("Caught exception after reading " + numValid + " ops from " + in + " while determining its valid length.", t); } - return new EditLogValidation(lastPos, numValid); + return new EditLogValidation(lastPos, firstTxId, lastTxId); } static class EditLogValidation { - long validLength; - long numTransactions; - - EditLogValidation(long validLength, long numTransactions) { + private long validLength; + private long startTxId; + private long endTxId; + + EditLogValidation(long validLength, + long startTxId, long endTxId) { this.validLength = validLength; - this.numTransactions = numTransactions; + this.startTxId = startTxId; + this.endTxId = endTxId; + } + + long getValidLength() { return validLength; } + + long getStartTxId() { return startTxId; } + + long getEndTxId() { return endTxId; } + + long getNumTransactions() { + if (endTxId == HdfsConstants.INVALID_TXID + || startTxId == HdfsConstants.INVALID_TXID) { + return 0; + } + return (endTxId - startTxId) + 1; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 6529c876c0..25f99b4081 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 8b259018f1..325e4b04ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -35,7 +35,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; @@ -44,9 +44,9 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageState; import org.apache.hadoop.hdfs.server.common.Util; import static org.apache.hadoop.hdfs.server.common.Util.now; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; -import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; + import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; @@ -70,7 +70,6 @@ public class FSImage implements Closeable { protected static final Log LOG = LogFactory.getLog(FSImage.class.getName()); - protected FSNamesystem namesystem = null; protected FSEditLog editLog = null; private boolean isUpgradeFinalized = false; @@ -82,38 +81,20 @@ public class FSImage implements Closeable { */ protected long lastAppliedTxId = 0; - /** - * URIs for importing an image from a checkpoint. In the default case, - * URIs will represent directories. - */ - private Collection checkpointDirs; - private Collection checkpointEditsDirs; - final private Configuration conf; private final NNStorageRetentionManager archivalManager; - /** - * Construct an FSImage. - * @param conf Configuration - * @see #FSImage(Configuration conf, FSNamesystem ns, - * Collection imageDirs, Collection editsDirs) - * @throws IOException if default directories are invalid. - */ - public FSImage(Configuration conf) throws IOException { - this(conf, (FSNamesystem)null); - } /** * Construct an FSImage * @param conf Configuration - * @param ns The FSNamesystem using this image. - * @see #FSImage(Configuration conf, FSNamesystem ns, + * @see #FSImage(Configuration conf, * Collection imageDirs, Collection editsDirs) * @throws IOException if default directories are invalid. */ - private FSImage(Configuration conf, FSNamesystem ns) throws IOException { - this(conf, ns, + protected FSImage(Configuration conf) throws IOException { + this(conf, FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf)); } @@ -124,17 +105,14 @@ private FSImage(Configuration conf, FSNamesystem ns) throws IOException { * Setup storage and initialize the edit log. * * @param conf Configuration - * @param ns The FSNamesystem using this image. * @param imageDirs Directories the image can be stored in. * @param editsDirs Directories the editlog can be stored in. * @throws IOException if directories are invalid. */ - protected FSImage(Configuration conf, FSNamesystem ns, + protected FSImage(Configuration conf, Collection imageDirs, Collection editsDirs) throws IOException { this.conf = conf; - setCheckpointDirectories(FSImage.getCheckpointDirs(conf, null), - FSImage.getCheckpointEditsDirs(conf, null)); storage = new NNStorage(conf, imageDirs, editsDirs); if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, @@ -143,31 +121,18 @@ protected FSImage(Configuration conf, FSNamesystem ns, } this.editLog = new FSEditLog(storage); - setFSNamesystem(ns); archivalManager = new NNStorageRetentionManager(conf, storage, editLog); } - - protected FSNamesystem getFSNamesystem() { - return namesystem; - } - - void setFSNamesystem(FSNamesystem ns) { - namesystem = ns; - if (ns != null) { - storage.setUpgradeManager(ns.upgradeManager); - } - } - void setCheckpointDirectories(Collection dirs, - Collection editsDirs) { - checkpointDirs = dirs; - checkpointEditsDirs = editsDirs; - } - - void format(String clusterId) throws IOException { + void format(FSNamesystem fsn, String clusterId) throws IOException { + long fileCount = fsn.getTotalFiles(); + // Expect 1 file, which is the root inode + Preconditions.checkState(fileCount == 1, + "FSImage.format should be called with an uninitialized namesystem, has " + + fileCount + " files"); storage.format(clusterId); - saveFSImageInAllDirs(0); + saveFSImageInAllDirs(fsn, 0); } /** @@ -179,7 +144,7 @@ void format(String clusterId) throws IOException { * @throws IOException * @return true if the image needs to be saved or false otherwise */ - boolean recoverTransitionRead(StartupOption startOpt) + boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target) throws IOException { assert startOpt != StartupOption.FORMAT : "NameNode formatting should be performed before reading the image"; @@ -187,21 +152,14 @@ boolean recoverTransitionRead(StartupOption startOpt) Collection imageDirs = storage.getImageDirectories(); Collection editsDirs = storage.getEditsDirectories(); + // none of the data dirs exist if((imageDirs.size() == 0 || editsDirs.size() == 0) && startOpt != StartupOption.IMPORT) throw new IOException( "All specified directories are not accessible or do not exist."); - if(startOpt == StartupOption.IMPORT - && (checkpointDirs == null || checkpointDirs.isEmpty())) - throw new IOException("Cannot import image from a checkpoint. " - + "\"dfs.namenode.checkpoint.dir\" is not set." ); - - if(startOpt == StartupOption.IMPORT - && (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty())) - throw new IOException("Cannot import image from a checkpoint. " - + "\"dfs.namenode.checkpoint.dir\" is not set." ); + storage.setUpgradeManager(target.upgradeManager); // 1. For each data directory calculate its state and // check whether all is consistent before transitioning. @@ -227,11 +185,11 @@ boolean recoverTransitionRead(StartupOption startOpt) } if (startOpt != StartupOption.UPGRADE && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION - && layoutVersion != FSConstants.LAYOUT_VERSION) { + && layoutVersion != HdfsConstants.LAYOUT_VERSION) { throw new IOException( "\nFile system image contains an old layout version " + storage.getLayoutVersion() + ".\nAn upgrade to version " - + FSConstants.LAYOUT_VERSION + " is required.\n" + + HdfsConstants.LAYOUT_VERSION + " is required.\n" + "Please restart NameNode with -upgrade option."); } @@ -261,10 +219,10 @@ boolean recoverTransitionRead(StartupOption startOpt) // 3. Do transitions switch(startOpt) { case UPGRADE: - doUpgrade(); + doUpgrade(target); return false; // upgrade saved image already case IMPORT: - doImportCheckpoint(); + doImportCheckpoint(target); return false; // import checkpoint saved image already case ROLLBACK: doRollback(); @@ -273,7 +231,7 @@ boolean recoverTransitionRead(StartupOption startOpt) // just load the image } - return loadFSImage(); + return loadFSImage(target); } /** @@ -324,11 +282,11 @@ private boolean recoverStorageDirs(StartupOption startOpt, return isFormatted; } - private void doUpgrade() throws IOException { + private void doUpgrade(FSNamesystem target) throws IOException { if(storage.getDistributedUpgradeState()) { // only distributed upgrade need to continue // don't do version upgrade - this.loadFSImage(); + this.loadFSImage(target); storage.initializeDistributedUpgrade(); return; } @@ -343,13 +301,13 @@ private void doUpgrade() throws IOException { } // load the latest image - this.loadFSImage(); + this.loadFSImage(target); // Do upgrade for each directory long oldCTime = storage.getCTime(); storage.cTime = now(); // generate new cTime for the state int oldLV = storage.getLayoutVersion(); - storage.layoutVersion = FSConstants.LAYOUT_VERSION; + storage.layoutVersion = HdfsConstants.LAYOUT_VERSION; List errorSDs = Collections.synchronizedList(new ArrayList()); @@ -385,7 +343,7 @@ private void doUpgrade() throws IOException { storage.reportErrorsOnDirectories(errorSDs); errorSDs.clear(); - saveFSImageInAllDirs(editLog.getLastWrittenTxId()); + saveFSImageInAllDirs(target, editLog.getLastWrittenTxId()); for (Iterator it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); @@ -422,8 +380,8 @@ private void doRollback() throws IOException { // a previous fs states in at least one of the storage directories. // Directories that don't have previous state do not rollback boolean canRollback = false; - FSImage prevState = new FSImage(conf, getFSNamesystem()); - prevState.getStorage().layoutVersion = FSConstants.LAYOUT_VERSION; + FSImage prevState = new FSImage(conf); + prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION; for (Iterator it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); File prevDir = sd.getPreviousDir(); @@ -438,12 +396,12 @@ private void doRollback() throws IOException { // read and verify consistency of the prev dir prevState.getStorage().readPreviousVersionProperties(sd); - if (prevState.getLayoutVersion() != FSConstants.LAYOUT_VERSION) { + if (prevState.getLayoutVersion() != HdfsConstants.LAYOUT_VERSION) { throw new IOException( "Cannot rollback to storage version " + prevState.getLayoutVersion() + " using this version of the NameNode, which uses storage version " + - FSConstants.LAYOUT_VERSION + ". " + + HdfsConstants.LAYOUT_VERSION + ". " + "Please use the previous version of HDFS to perform the rollback."); } canRollback = true; @@ -504,19 +462,32 @@ private void doFinalize(StorageDirectory sd) throws IOException { /** * Load image from a checkpoint directory and save it into the current one. + * @param target the NameSystem to import into * @throws IOException */ - void doImportCheckpoint() throws IOException { - FSNamesystem fsNamesys = getFSNamesystem(); - FSImage ckptImage = new FSImage(conf, fsNamesys, + void doImportCheckpoint(FSNamesystem target) throws IOException { + Collection checkpointDirs = + FSImage.getCheckpointDirs(conf, null); + Collection checkpointEditsDirs = + FSImage.getCheckpointEditsDirs(conf, null); + + if (checkpointDirs == null || checkpointDirs.isEmpty()) { + throw new IOException("Cannot import image from a checkpoint. " + + "\"dfs.namenode.checkpoint.dir\" is not set." ); + } + + if (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()) { + throw new IOException("Cannot import image from a checkpoint. " + + "\"dfs.namenode.checkpoint.dir\" is not set." ); + } + + FSImage realImage = target.getFSImage(); + FSImage ckptImage = new FSImage(conf, checkpointDirs, checkpointEditsDirs); - // replace real image with the checkpoint image - FSImage realImage = fsNamesys.getFSImage(); - assert realImage == this; - fsNamesys.dir.fsImage = ckptImage; + target.dir.fsImage = ckptImage; // load from the checkpoint dirs try { - ckptImage.recoverTransitionRead(StartupOption.REGULAR); + ckptImage.recoverTransitionRead(StartupOption.REGULAR, target); } finally { ckptImage.close(); } @@ -524,10 +495,11 @@ void doImportCheckpoint() throws IOException { realImage.getStorage().setStorageInfo(ckptImage.getStorage()); realImage.getEditLog().setNextTxId(ckptImage.getEditLog().getLastWrittenTxId()+1); - fsNamesys.dir.fsImage = realImage; + target.dir.fsImage = realImage; realImage.getStorage().setBlockPoolID(ckptImage.getBlockPoolID()); + // and save it but keep the same checkpointTime - saveNamespace(); + saveNamespace(target); getStorage().writeAll(); } @@ -558,11 +530,11 @@ void openEditLog() throws IOException { * Toss the current image and namesystem, reloading from the specified * file. */ - void reloadFromImageFile(File file) throws IOException { - namesystem.dir.reset(); + void reloadFromImageFile(File file, FSNamesystem target) throws IOException { + target.dir.reset(); LOG.debug("Reloading namespace from " + file); - loadFSImage(file); + loadFSImage(file, target); } /** @@ -580,36 +552,42 @@ void reloadFromImageFile(File file) throws IOException { * @return whether the image should be saved * @throws IOException */ - boolean loadFSImage() throws IOException { + boolean loadFSImage(FSNamesystem target) throws IOException { FSImageStorageInspector inspector = storage.readAndInspectDirs(); isUpgradeFinalized = inspector.isUpgradeFinalized(); - + + FSImageStorageInspector.FSImageFile imageFile + = inspector.getLatestImage(); boolean needToSave = inspector.needToSave(); + + Iterable editStreams = null; + + editLog.recoverUnclosedStreams(); + + if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, + getLayoutVersion())) { + editStreams = editLog.selectInputStreams(imageFile.getCheckpointTxId() + 1, + inspector.getMaxSeenTxId()); + } else { + editStreams = FSImagePreTransactionalStorageInspector + .getEditLogStreams(storage); + } + + LOG.debug("Planning to load image :\n" + imageFile); + for (EditLogInputStream l : editStreams) { + LOG.debug("\t Planning to load edit stream: " + l); + } - // Plan our load. This will throw if it's impossible to load from the - // data that's available. - LoadPlan loadPlan = inspector.createLoadPlan(); - LOG.debug("Planning to load image using following plan:\n" + loadPlan); - - - // Recover from previous interrupted checkpoint, if any - needToSave |= loadPlan.doRecovery(); - - // - // Load in bits - // - StorageDirectory sdForProperties = - loadPlan.getStorageDirectoryForProperties(); - storage.readProperties(sdForProperties); - File imageFile = loadPlan.getImageFile(); - try { + StorageDirectory sdForProperties = imageFile.sd; + storage.readProperties(sdForProperties); + if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, getLayoutVersion())) { // For txid-based layout, we should have a .md5 file // next to the image file - loadFSImage(imageFile); + loadFSImage(imageFile.getFile(), target); } else if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) { // In 0.22, we have the checksum stored in the VERSION file. @@ -621,17 +599,19 @@ boolean loadFSImage() throws IOException { NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY + " not set for storage directory " + sdForProperties.getRoot()); } - loadFSImage(imageFile, new MD5Hash(md5)); + loadFSImage(imageFile.getFile(), new MD5Hash(md5), target); } else { // We don't have any record of the md5sum - loadFSImage(imageFile, null); + loadFSImage(imageFile.getFile(), null, target); } } catch (IOException ioe) { - throw new IOException("Failed to load image from " + loadPlan.getImageFile(), ioe); + FSEditLog.closeAllStreams(editStreams); + throw new IOException("Failed to load image from " + imageFile, ioe); } - long numLoaded = loadEdits(loadPlan.getEditsFiles()); - needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile, numLoaded); + long numLoaded = loadEdits(editStreams, target); + needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(), + numLoaded); // update the txid for the edit log editLog.setNextTxId(storage.getMostRecentCheckpointTxId() + numLoaded + 1); @@ -663,26 +643,30 @@ private boolean needsResaveBasedOnStaleCheckpoint( * Load the specified list of edit files into the image. * @return the number of transactions loaded */ - protected long loadEdits(List editLogs) throws IOException { - LOG.debug("About to load edits:\n " + Joiner.on("\n ").join(editLogs)); + protected long loadEdits(Iterable editStreams, + FSNamesystem target) throws IOException { + LOG.debug("About to load edits:\n " + Joiner.on("\n ").join(editStreams)); long startingTxId = getLastAppliedTxId() + 1; - - FSEditLogLoader loader = new FSEditLogLoader(namesystem); int numLoaded = 0; - // Load latest edits - for (File edits : editLogs) { - LOG.debug("Reading " + edits + " expecting start txid #" + startingTxId); - EditLogFileInputStream editIn = new EditLogFileInputStream(edits); - int thisNumLoaded = loader.loadFSEdits(editIn, startingTxId); - startingTxId += thisNumLoaded; - numLoaded += thisNumLoaded; - lastAppliedTxId += thisNumLoaded; - editIn.close(); + + try { + FSEditLogLoader loader = new FSEditLogLoader(target); + + // Load latest edits + for (EditLogInputStream editIn : editStreams) { + LOG.info("Reading " + editIn + " expecting start txid #" + startingTxId); + int thisNumLoaded = loader.loadFSEdits(editIn, startingTxId); + startingTxId += thisNumLoaded; + numLoaded += thisNumLoaded; + lastAppliedTxId += thisNumLoaded; + } + } finally { + FSEditLog.closeAllStreams(editStreams); } // update the counts - getFSNamesystem().dir.updateCountForINodeWithQuota(); + target.dir.updateCountForINodeWithQuota(); return numLoaded; } @@ -691,13 +675,14 @@ protected long loadEdits(List editLogs) throws IOException { * Load the image namespace from the given image file, verifying * it against the MD5 sum stored in its associated .md5 file. */ - private void loadFSImage(File imageFile) throws IOException { + private void loadFSImage(File imageFile, FSNamesystem target) + throws IOException { MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile); if (expectedMD5 == null) { throw new IOException("No MD5 file found corresponding to image file " + imageFile); } - loadFSImage(imageFile, expectedMD5); + loadFSImage(imageFile, expectedMD5, target); } /** @@ -705,11 +690,12 @@ private void loadFSImage(File imageFile) throws IOException { * filenames and blocks. Return whether we should * "re-save" and consolidate the edit-logs */ - private void loadFSImage(File curFile, MD5Hash expectedMd5) throws IOException { + private void loadFSImage(File curFile, MD5Hash expectedMd5, + FSNamesystem target) throws IOException { FSImageFormat.Loader loader = new FSImageFormat.Loader( - conf, getFSNamesystem()); + conf, target); loader.load(curFile); - namesystem.setBlockPoolId(this.getBlockPoolID()); + target.setBlockPoolId(this.getBlockPoolID()); // Check that the image digest we loaded matches up with what // we expected @@ -730,13 +716,14 @@ private void loadFSImage(File curFile, MD5Hash expectedMd5) throws IOException { /** * Save the contents of the FS image to the file. */ - void saveFSImage(StorageDirectory sd, long txid) throws IOException { + void saveFSImage(FSNamesystem source, StorageDirectory sd, long txid) + throws IOException { File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); File dstFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, txid); FSImageFormat.Saver saver = new FSImageFormat.Saver(); FSImageCompression compression = FSImageCompression.createCompression(conf); - saver.save(newFile, txid, getFSNamesystem(), compression); + saver.save(newFile, txid, source, compression); MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest()); storage.setMostRecentCheckpointTxId(txid); @@ -757,8 +744,11 @@ private class FSImageSaver implements Runnable { private StorageDirectory sd; private List errorSDs; private final long txid; + private final FSNamesystem source; - FSImageSaver(StorageDirectory sd, List errorSDs, long txid) { + FSImageSaver(FSNamesystem source, StorageDirectory sd, + List errorSDs, long txid) { + this.source = source; this.sd = sd; this.errorSDs = errorSDs; this.txid = txid; @@ -766,7 +756,7 @@ private class FSImageSaver implements Runnable { public void run() { try { - saveFSImage(sd, txid); + saveFSImage(source, sd, txid); } catch (Throwable t) { LOG.error("Unable to save image for " + sd.getRoot(), t); errorSDs.add(sd); @@ -795,7 +785,7 @@ private void waitForThreads(List threads) { * Save the contents of the FS image to a new image file in each of the * current storage directories. */ - void saveNamespace() throws IOException { + void saveNamespace(FSNamesystem source) throws IOException { assert editLog != null : "editLog must be initialized"; storage.attemptRestoreRemovedStorage(); @@ -806,7 +796,7 @@ void saveNamespace() throws IOException { } long imageTxId = editLog.getLastWrittenTxId(); try { - saveFSImageInAllDirs(imageTxId); + saveFSImageInAllDirs(source, imageTxId); storage.writeAll(); } finally { if (editLogWasOpen) { @@ -818,7 +808,8 @@ void saveNamespace() throws IOException { } - protected void saveFSImageInAllDirs(long txid) throws IOException { + protected void saveFSImageInAllDirs(FSNamesystem source, long txid) + throws IOException { if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) { throw new IOException("No image directories available!"); } @@ -831,7 +822,7 @@ protected void saveFSImageInAllDirs(long txid) throws IOException { for (Iterator it = storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { StorageDirectory sd = it.next(); - FSImageSaver saver = new FSImageSaver(sd, errorSDs, txid); + FSImageSaver saver = new FSImageSaver(source, sd, errorSDs, txid); Thread saveThread = new Thread(saver, saver.toString()); saveThreads.add(saveThread); saveThread.start(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 453985d917..c178e048b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -39,7 +39,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -555,9 +555,14 @@ void save(File newFile, DigestOutputStream fos = new DigestOutputStream(fout, digester); DataOutputStream out = new DataOutputStream(fos); try { - out.writeInt(FSConstants.LAYOUT_VERSION); - out.writeInt(sourceNamesystem.getFSImage() - .getStorage().getNamespaceID()); // TODO bad dependency + out.writeInt(HdfsConstants.LAYOUT_VERSION); + // We use the non-locked version of getNamespaceInfo here since + // the coordinating thread of saveNamespace already has read-locked + // the namespace for us. If we attempt to take another readlock + // from the actual saver thread, there's a potential of a + // fairness-related deadlock. See the comments on HDFS-2223. + out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo() + .getNamespaceID()); out.writeLong(fsDir.rootDir.numItemsInTree()); out.writeLong(sourceNamesystem.getGenerationStamp()); out.writeLong(txid); @@ -568,7 +573,7 @@ void save(File newFile, " using " + compression); - byte[] byteStore = new byte[4*FSConstants.MAX_PATH_LENGTH]; + byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH]; ByteBuffer strbuf = ByteBuffer.wrap(byteStore); // save the root FSImageSerialization.saveINode2Image(fsDir.rootDir, out); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java index cec2eeff2d..91076ef5f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; @@ -55,6 +56,7 @@ class FSImagePreTransactionalStorageInspector extends FSImageStorageInspector { private boolean hasOutOfDateStorageDirs = false; /* Flag set false if there are any "previous" directories found */ private boolean isUpgradeFinalized = true; + private boolean needToSaveAfterRecovery = false; // Track the name and edits dir with the latest times private long latestNameCheckpointTime = Long.MIN_VALUE; @@ -139,15 +141,15 @@ static long readCheckpointTime(StorageDirectory sd) throws IOException { boolean isUpgradeFinalized() { return isUpgradeFinalized; } - + @Override - LoadPlan createLoadPlan() throws IOException { + FSImageFile getLatestImage() throws IOException { // We should have at least one image and one edits dirs if (latestNameSD == null) throw new IOException("Image file is not found in " + imageDirs); if (latestEditsSD == null) throw new IOException("Edits file is not found in " + editsDirs); - + // Make sure we are loading image and edits from same checkpoint if (latestNameCheckpointTime > latestEditsCheckpointTime && latestNameSD != latestEditsSD @@ -168,92 +170,70 @@ LoadPlan createLoadPlan() throws IOException { "image checkpoint time = " + latestNameCheckpointTime + "edits checkpoint time = " + latestEditsCheckpointTime); } + + needToSaveAfterRecovery = doRecovery(); - return new PreTransactionalLoadPlan(); + return new FSImageFile(latestNameSD, + NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE), + HdfsConstants.INVALID_TXID); } - + @Override boolean needToSave() { return hasOutOfDateStorageDirs || checkpointTimes.size() != 1 || - latestNameCheckpointTime > latestEditsCheckpointTime; - + latestNameCheckpointTime > latestEditsCheckpointTime || + needToSaveAfterRecovery; } - private class PreTransactionalLoadPlan extends LoadPlan { - - @Override - boolean doRecovery() throws IOException { - LOG.debug( + boolean doRecovery() throws IOException { + LOG.debug( "Performing recovery in "+ latestNameSD + " and " + latestEditsSD); - boolean needToSave = false; - File curFile = - NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE); - File ckptFile = - NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE_NEW); - - // - // If we were in the midst of a checkpoint - // - if (ckptFile.exists()) { - needToSave = true; - if (NNStorage.getStorageFile(latestEditsSD, NameNodeFile.EDITS_NEW) - .exists()) { - // - // checkpointing migth have uploaded a new - // merged image, but we discard it here because we are - // not sure whether the entire merged image was uploaded - // before the namenode crashed. - // - if (!ckptFile.delete()) { - throw new IOException("Unable to delete " + ckptFile); - } - } else { - // - // checkpointing was in progress when the namenode - // shutdown. The fsimage.ckpt was created and the edits.new - // file was moved to edits. We complete that checkpoint by - // moving fsimage.new to fsimage. There is no need to - // update the fstime file here. renameTo fails on Windows - // if the destination file already exists. - // + boolean needToSave = false; + File curFile = + NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE); + File ckptFile = + NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE_NEW); + + // + // If we were in the midst of a checkpoint + // + if (ckptFile.exists()) { + needToSave = true; + if (NNStorage.getStorageFile(latestEditsSD, NameNodeFile.EDITS_NEW) + .exists()) { + // + // checkpointing migth have uploaded a new + // merged image, but we discard it here because we are + // not sure whether the entire merged image was uploaded + // before the namenode crashed. + // + if (!ckptFile.delete()) { + throw new IOException("Unable to delete " + ckptFile); + } + } else { + // + // checkpointing was in progress when the namenode + // shutdown. The fsimage.ckpt was created and the edits.new + // file was moved to edits. We complete that checkpoint by + // moving fsimage.new to fsimage. There is no need to + // update the fstime file here. renameTo fails on Windows + // if the destination file already exists. + // + if (!ckptFile.renameTo(curFile)) { + if (!curFile.delete()) + LOG.warn("Unable to delete dir " + curFile + " before rename"); if (!ckptFile.renameTo(curFile)) { - if (!curFile.delete()) - LOG.warn("Unable to delete dir " + curFile + " before rename"); - if (!ckptFile.renameTo(curFile)) { - throw new IOException("Unable to rename " + ckptFile + - " to " + curFile); - } + throw new IOException("Unable to rename " + ckptFile + + " to " + curFile); } } } - return needToSave; } - - @Override - File getImageFile() { - return NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE); - } - - @Override - List getEditsFiles() { - if (latestNameCheckpointTime > latestEditsCheckpointTime) { - // the image is already current, discard edits - LOG.debug( - "Name checkpoint time is newer than edits, not loading edits."); - return Collections.emptyList(); - } - - return getEditsInStorageDir(latestEditsSD); - } - - @Override - StorageDirectory getStorageDirectoryForProperties() { - return latestNameSD; - } + return needToSave; } - + /** * @return a list with the paths to EDITS and EDITS_NEW (if it exists) * in a given storage directory. @@ -269,4 +249,33 @@ static List getEditsInStorageDir(StorageDirectory sd) { } return files; } + + private List getLatestEditsFiles() { + if (latestNameCheckpointTime > latestEditsCheckpointTime) { + // the image is already current, discard edits + LOG.debug( + "Name checkpoint time is newer than edits, not loading edits."); + return Collections.emptyList(); + } + + return getEditsInStorageDir(latestEditsSD); + } + + @Override + long getMaxSeenTxId() { + return 0L; + } + + static Iterable getEditLogStreams(NNStorage storage) + throws IOException { + FSImagePreTransactionalStorageInspector inspector + = new FSImagePreTransactionalStorageInspector(); + storage.inspectStorageDirs(inspector); + + List editStreams = new ArrayList(); + for (File f : inspector.getLatestEditsFiles()) { + editStreams.add(new EditLogFileInputStream(f)); + } + return editStreams; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 277fac0eb9..3ed8513636 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java index 65bfa0ac55..a7c2949f29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -43,60 +44,22 @@ abstract class FSImageStorageInspector { abstract boolean isUpgradeFinalized(); /** - * Create a plan to load the image from the set of inspected storage directories. + * Get the image files which should be loaded into the filesystem. * @throws IOException if not enough files are available (eg no image found in any directory) */ - abstract LoadPlan createLoadPlan() throws IOException; - + abstract FSImageFile getLatestImage() throws IOException; + + /** + * Get the minimum tx id which should be loaded with this set of images. + */ + abstract long getMaxSeenTxId(); + /** * @return true if the directories are in such a state that the image should be re-saved * following the load */ abstract boolean needToSave(); - /** - * A plan to load the namespace from disk, providing the locations from which to load - * the image and a set of edits files. - */ - abstract static class LoadPlan { - /** - * Execute atomic move sequence in the chosen storage directories, - * in order to recover from an interrupted checkpoint. - * @return true if some recovery action was taken - */ - abstract boolean doRecovery() throws IOException; - - /** - * @return the file from which to load the image data - */ - abstract File getImageFile(); - - /** - * @return a list of flies containing edits to replay - */ - abstract List getEditsFiles(); - - /** - * @return the storage directory containing the VERSION file that should be - * loaded. - */ - abstract StorageDirectory getStorageDirectoryForProperties(); - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("Will load image file: ").append(getImageFile()).append("\n"); - sb.append("Will load edits files:").append("\n"); - for (File f : getEditsFiles()) { - sb.append(" ").append(f).append("\n"); - } - sb.append("Will load metadata from: ") - .append(getStorageDirectoryForProperties()) - .append("\n"); - return sb.toString(); - } - } - /** * Record of an image that has been located and had its filename parsed. */ @@ -106,7 +69,8 @@ static class FSImageFile { private final File file; FSImageFile(StorageDirectory sd, File file, long txId) { - assert txId >= 0 : "Invalid txid on " + file +": " + txId; + assert txId >= 0 || txId == HdfsConstants.INVALID_TXID + : "Invalid txid on " + file +": " + txId; this.sd = sd; this.txId = txId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java index 0814a140b5..33d6e90f92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java @@ -35,11 +35,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; -import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -55,9 +54,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector { private boolean isUpgradeFinalized = true; List foundImages = new ArrayList(); - List foundEditLogs = new ArrayList(); - SortedMap logGroups = new TreeMap(); - long maxSeenTxId = 0; + private long maxSeenTxId = 0; private static final Pattern IMAGE_REGEX = Pattern.compile( NameNodeFile.IMAGE.getName() + "_(\\d+)"); @@ -71,6 +68,8 @@ public void inspectDirectory(StorageDirectory sd) throws IOException { return; } + maxSeenTxId = Math.max(maxSeenTxId, NNStorage.readTransactionIdFile(sd)); + File currentDir = sd.getCurrentDir(); File filesInStorage[]; try { @@ -113,34 +112,10 @@ public void inspectDirectory(StorageDirectory sd) throws IOException { LOG.warn("Unable to determine the max transaction ID seen by " + sd, ioe); } - List editLogs - = FileJournalManager.matchEditLogs(filesInStorage); - if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { - for (EditLogFile log : editLogs) { - addEditLog(log); - } - } else if (!editLogs.isEmpty()){ - LOG.warn("Found the following edit log file(s) in " + sd + - " even though it was not configured to store edits:\n" + - " " + Joiner.on("\n ").join(editLogs)); - - } - // set finalized flag isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists(); } - private void addEditLog(EditLogFile foundEditLog) { - foundEditLogs.add(foundEditLog); - LogGroup group = logGroups.get(foundEditLog.getFirstTxId()); - if (group == null) { - group = new LogGroup(foundEditLog.getFirstTxId()); - logGroups.put(foundEditLog.getFirstTxId(), group); - } - group.add(foundEditLog); - } - - @Override public boolean isUpgradeFinalized() { return isUpgradeFinalized; @@ -151,9 +126,13 @@ public boolean isUpgradeFinalized() { * If there are multiple storage directories which contain equal images * the storage directory that was inspected first will be preferred. * - * Returns null if no images were found. + * @throws FileNotFoundException if not images are found. */ - FSImageFile getLatestImage() { + FSImageFile getLatestImage() throws IOException { + if (foundImages.isEmpty()) { + throw new FileNotFoundException("No valid image files found"); + } + FSImageFile ret = null; for (FSImageFile img : foundImages) { if (ret == null || img.txId > ret.txId) { @@ -167,349 +146,13 @@ public List getFoundImages() { return ImmutableList.copyOf(foundImages); } - public List getEditLogFiles() { - return ImmutableList.copyOf(foundEditLogs); - } - - @Override - public LoadPlan createLoadPlan() throws IOException { - if (foundImages.isEmpty()) { - throw new FileNotFoundException("No valid image files found"); - } - - FSImageFile recoveryImage = getLatestImage(); - LogLoadPlan logPlan = createLogLoadPlan(recoveryImage.txId, Long.MAX_VALUE); - - return new TransactionalLoadPlan(recoveryImage, - logPlan); - } - - /** - * Plan which logs to load in order to bring the namespace up-to-date. - * Transactions will be considered in the range (sinceTxId, maxTxId] - * - * @param sinceTxId the highest txid that is already loaded - * (eg from the image checkpoint) - * @param maxStartTxId ignore any log files that start after this txid - */ - LogLoadPlan createLogLoadPlan(long sinceTxId, long maxStartTxId) throws IOException { - long expectedTxId = sinceTxId + 1; - - List recoveryLogs = new ArrayList(); - - SortedMap tailGroups = logGroups.tailMap(expectedTxId); - if (logGroups.size() > tailGroups.size()) { - LOG.debug("Excluded " + (logGroups.size() - tailGroups.size()) + - " groups of logs because they start with a txid less than image " + - "txid " + sinceTxId); - } - - SortedMap usefulGroups; - if (maxStartTxId > sinceTxId) { - usefulGroups = tailGroups.headMap(maxStartTxId); - } else { - usefulGroups = new TreeMap(); - } - - if (usefulGroups.size() > tailGroups.size()) { - LOG.debug("Excluded " + (tailGroups.size() - usefulGroups.size()) + - " groups of logs because they start with a txid higher than max " + - "txid " + sinceTxId); - } - - - for (Map.Entry entry : usefulGroups.entrySet()) { - long logStartTxId = entry.getKey(); - LogGroup logGroup = entry.getValue(); - - logGroup.planRecovery(); - - if (expectedTxId != FSConstants.INVALID_TXID && logStartTxId != expectedTxId) { - throw new IOException("Expected next log group would start at txid " + - expectedTxId + " but starts at txid " + logStartTxId); - } - - // We can pick any of the non-corrupt logs here - recoveryLogs.add(logGroup.getBestNonCorruptLog()); - - // If this log group was finalized, we know to expect the next - // log group to start at the following txid (ie no gaps) - if (logGroup.hasKnownLastTxId()) { - expectedTxId = logGroup.getLastTxId() + 1; - } else { - // the log group was in-progress so we don't know what ID - // the next group should start from. - expectedTxId = FSConstants.INVALID_TXID; - } - } - - long lastLogGroupStartTxId = usefulGroups.isEmpty() ? - 0 : usefulGroups.lastKey(); - if (maxSeenTxId > sinceTxId && - maxSeenTxId > lastLogGroupStartTxId) { - String msg = "At least one storage directory indicated it has seen a " + - "log segment starting at txid " + maxSeenTxId; - if (usefulGroups.isEmpty()) { - msg += " but there are no logs to load."; - } else { - msg += " but the most recent log file found starts with txid " + - lastLogGroupStartTxId; - } - throw new IOException(msg); - } - - return new LogLoadPlan(recoveryLogs, - Lists.newArrayList(usefulGroups.values())); - - } - @Override public boolean needToSave() { return needToSave; } - - /** - * A group of logs that all start at the same txid. - * - * Handles determining which logs are corrupt and which should be considered - * candidates for loading. - */ - static class LogGroup { - long startTxId; - List logs = new ArrayList();; - private Set endTxIds = new TreeSet(); - private boolean hasInProgress = false; - private boolean hasFinalized = false; - - LogGroup(long startTxId) { - this.startTxId = startTxId; - } - - EditLogFile getBestNonCorruptLog() { - // First look for non-corrupt finalized logs - for (EditLogFile log : logs) { - if (!log.isCorrupt() && !log.isInProgress()) { - return log; - } - } - // Then look for non-corrupt in-progress logs - for (EditLogFile log : logs) { - if (!log.isCorrupt()) { - return log; - } - } - // We should never get here, because we don't get to the planning stage - // without calling planRecovery first, and if we've called planRecovery, - // we would have already thrown if there were no non-corrupt logs! - throw new IllegalStateException( - "No non-corrupt logs for txid " + startTxId); - } - - /** - * @return true if we can determine the last txid in this log group. - */ - boolean hasKnownLastTxId() { - for (EditLogFile log : logs) { - if (!log.isInProgress()) { - return true; - } - } - return false; - } - - /** - * @return the last txid included in the logs in this group - * @throws IllegalStateException if it is unknown - - * {@see #hasKnownLastTxId()} - */ - long getLastTxId() { - for (EditLogFile log : logs) { - if (!log.isInProgress()) { - return log.getLastTxId(); - } - } - throw new IllegalStateException("LogGroup only has in-progress logs"); - } - - - void add(EditLogFile log) { - assert log.getFirstTxId() == startTxId; - logs.add(log); - - if (log.isInProgress()) { - hasInProgress = true; - } else { - hasFinalized = true; - endTxIds.add(log.getLastTxId()); - } - } - - void planRecovery() throws IOException { - assert hasInProgress || hasFinalized; - - checkConsistentEndTxIds(); - - if (hasFinalized && hasInProgress) { - planMixedLogRecovery(); - } else if (!hasFinalized && hasInProgress) { - planAllInProgressRecovery(); - } else if (hasFinalized && !hasInProgress) { - LOG.debug("No recovery necessary for logs starting at txid " + - startTxId); - } - } - - /** - * Recovery case for when some logs in the group were in-progress, and - * others were finalized. This happens when one of the storage - * directories fails. - * - * The in-progress logs in this case should be considered corrupt. - */ - private void planMixedLogRecovery() throws IOException { - for (EditLogFile log : logs) { - if (log.isInProgress()) { - LOG.warn("Log at " + log.getFile() + " is in progress, but " + - "other logs starting at the same txid " + startTxId + - " are finalized. Moving aside."); - log.markCorrupt(); - } - } - } - - /** - * Recovery case for when all of the logs in the group were in progress. - * This happens if the NN completely crashes and restarts. In this case - * we check the non-zero lengths of each log file, and any logs that are - * less than the max of these lengths are considered corrupt. - */ - private void planAllInProgressRecovery() throws IOException { - // We only have in-progress logs. We need to figure out which logs have - // the latest data to reccover them - LOG.warn("Logs beginning at txid " + startTxId + " were are all " + - "in-progress (probably truncated due to a previous NameNode " + - "crash)"); - if (logs.size() == 1) { - // Only one log, it's our only choice! - EditLogFile log = logs.get(0); - if (log.validateLog().numTransactions == 0) { - // If it has no transactions, we should consider it corrupt just - // to be conservative. - // See comment below for similar case - LOG.warn("Marking log at " + log.getFile() + " as corrupt since " + - "it has no transactions in it."); - log.markCorrupt(); - } - return; - } - - long maxValidTxnCount = Long.MIN_VALUE; - for (EditLogFile log : logs) { - long validTxnCount = log.validateLog().numTransactions; - LOG.warn(" Log " + log.getFile() + - " valid txns=" + validTxnCount + - " valid len=" + log.validateLog().validLength); - maxValidTxnCount = Math.max(maxValidTxnCount, validTxnCount); - } - - for (EditLogFile log : logs) { - long txns = log.validateLog().numTransactions; - if (txns < maxValidTxnCount) { - LOG.warn("Marking log at " + log.getFile() + " as corrupt since " + - "it is has only " + txns + " valid txns whereas another " + - "log has " + maxValidTxnCount); - log.markCorrupt(); - } else if (txns == 0) { - // this can happen if the NN crashes right after rolling a log - // but before the START_LOG_SEGMENT txn is written. Since the log - // is empty, we can just move it aside to its corrupt name. - LOG.warn("Marking log at " + log.getFile() + " as corrupt since " + - "it has no transactions in it."); - log.markCorrupt(); - } - } - } - - /** - * Check for the case when we have multiple finalized logs and they have - * different ending transaction IDs. This violates an invariant that all - * log directories should roll together. We should abort in this case. - */ - private void checkConsistentEndTxIds() throws IOException { - if (hasFinalized && endTxIds.size() > 1) { - throw new IOException("More than one ending txid was found " + - "for logs starting at txid " + startTxId + ". " + - "Found: " + StringUtils.join(endTxIds, ',')); - } - } - - void recover() throws IOException { - for (EditLogFile log : logs) { - if (log.isCorrupt()) { - log.moveAsideCorruptFile(); - } else if (log.isInProgress()) { - log.finalizeLog(); - } - } - } - } - - static class TransactionalLoadPlan extends LoadPlan { - final FSImageFile image; - final LogLoadPlan logPlan; - - public TransactionalLoadPlan(FSImageFile image, - LogLoadPlan logPlan) { - super(); - this.image = image; - this.logPlan = logPlan; - } - - @Override - boolean doRecovery() throws IOException { - logPlan.doRecovery(); - return false; - } - - @Override - File getImageFile() { - return image.getFile(); - } - - @Override - List getEditsFiles() { - return logPlan.getEditsFiles(); - } - - @Override - StorageDirectory getStorageDirectoryForProperties() { - return image.sd; - } - } - - static class LogLoadPlan { - final List editLogs; - final List logGroupsToRecover; - - LogLoadPlan(List editLogs, - List logGroupsToRecover) { - this.editLogs = editLogs; - this.logGroupsToRecover = logGroupsToRecover; - } - - public void doRecovery() throws IOException { - for (LogGroup g : logGroupsToRecover) { - g.recover(); - } - } - - public List getEditsFiles() { - List ret = new ArrayList(); - for (EditLogFile log : editLogs) { - ret.add(log.getFile()); - } - return ret; - } + @Override + long getMaxSeenTxId() { + return maxSeenTxId; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ceb557b4e6..116fa4826a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -78,10 +78,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -99,9 +99,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.Util; @@ -134,6 +134,8 @@ import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; +import com.google.common.base.Preconditions; + /*************************************************** * FSNamesystem does the actual bookkeeping work for the * DataNode. @@ -258,12 +260,43 @@ private static final void logAuditEvent(UserGroupInformation ugi, // lock to protect FSNamesystem. private ReentrantReadWriteLock fsLock; + /** - * FSNamesystem constructor. + * Instantiates an FSNamesystem loaded from the image and edits + * directories specified in the passed Configuration. + * + * @param conf the Configuration which specifies the storage directories + * from which to load + * @return an FSNamesystem which contains the loaded namespace + * @throws IOException if loading fails */ - FSNamesystem(Configuration conf) throws IOException { + public static FSNamesystem loadFromDisk(Configuration conf) throws IOException { + FSImage fsImage = new FSImage(conf); + FSNamesystem namesystem = new FSNamesystem(conf, fsImage); + + long loadStart = now(); + StartupOption startOpt = NameNode.getStartupOption(conf); + namesystem.loadFSImage(startOpt, fsImage); + long timeTakenToLoadFSImage = now() - loadStart; + LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs"); + NameNode.getNameNodeMetrics().setFsImageLoadTime( + (int) timeTakenToLoadFSImage); + return namesystem; + } + + /** + * Create an FSNamesystem associated with the specified image. + * + * Note that this does not load any data off of disk -- if you would + * like that behavior, use {@link #loadFromDisk(Configuration)} + + * @param fnImage The FSImage to associate with + * @param conf configuration + * @throws IOException on bad configuration + */ + FSNamesystem(Configuration conf, FSImage fsImage) throws IOException { try { - initialize(conf, null); + initialize(conf, fsImage); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -279,29 +312,41 @@ private void initialize(Configuration conf, FSImage fsImage) resourceRecheckInterval = conf.getLong( DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT); - nnResourceChecker = new NameNodeResourceChecker(conf); - checkAvailableResources(); this.systemStart = now(); this.blockManager = new BlockManager(this, conf); this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics(); this.fsLock = new ReentrantReadWriteLock(true); // fair locking setConfigurationParameters(conf); dtSecretManager = createDelegationTokenSecretManager(conf); - this.registerMBean(); // register the MBean for the FSNamesystemState - if(fsImage == null) { - this.dir = new FSDirectory(this, conf); - StartupOption startOpt = NameNode.getStartupOption(conf); - this.dir.loadFSImage(startOpt); - long timeTakenToLoadFSImage = now() - systemStart; - LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs"); - NameNode.getNameNodeMetrics().setFsImageLoadTime( - (int) timeTakenToLoadFSImage); - } else { - this.dir = new FSDirectory(fsImage, this, conf); - } + this.dir = new FSDirectory(fsImage, this, conf); this.safeMode = new SafeModeInfo(conf); } + void loadFSImage(StartupOption startOpt, FSImage fsImage) + throws IOException { + // format before starting up if requested + if (startOpt == StartupOption.FORMAT) { + + fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id + + startOpt = StartupOption.REGULAR; + } + boolean success = false; + try { + if (fsImage.recoverTransitionRead(startOpt, this)) { + fsImage.saveNamespace(this); + } + fsImage.openEditLog(); + + success = true; + } finally { + if (!success) { + fsImage.close(); + } + } + dir.imageLoadComplete(); + } + void activateSecretManager() throws IOException { if (dtSecretManager != null) { dtSecretManager.startThreads(); @@ -312,8 +357,13 @@ void activateSecretManager() throws IOException { * Activate FSNamesystem daemons. */ void activate(Configuration conf) throws IOException { + this.registerMBean(); // register the MBean for the FSNamesystemState + writeLock(); try { + nnResourceChecker = new NameNodeResourceChecker(conf); + checkAvailableResources(); + setBlockTotal(); blockManager.activate(conf); @@ -396,36 +446,6 @@ public boolean hasReadOrWriteLock() { return hasReadLock() || hasWriteLock(); } - /** - * dirs is a list of directories where the filesystem directory state - * is stored - */ - FSNamesystem(FSImage fsImage, Configuration conf) throws IOException { - this.fsLock = new ReentrantReadWriteLock(true); - this.blockManager = new BlockManager(this, conf); - setConfigurationParameters(conf); - this.dir = new FSDirectory(fsImage, this, conf); - dtSecretManager = createDelegationTokenSecretManager(conf); - } - - /** - * Create FSNamesystem for {@link BackupNode}. - * Should do everything that would be done for the NameNode, - * except for loading the image. - * - * @param bnImage {@link BackupImage} - * @param conf configuration - * @throws IOException - */ - FSNamesystem(Configuration conf, BackupImage bnImage) throws IOException { - try { - initialize(conf, bnImage); - } catch(IOException e) { - LOG.error(getClass().getSimpleName() + " initialization failed.", e); - close(); - throw e; - } - } /** * Initializes some of the members from configuration @@ -475,15 +495,22 @@ protected PermissionStatus getUpgradePermission() { NamespaceInfo getNamespaceInfo() { readLock(); try { - return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(), - getClusterId(), getBlockPoolId(), - dir.fsImage.getStorage().getCTime(), - upgradeManager.getUpgradeVersion()); + return unprotectedGetNamespaceInfo(); } finally { readUnlock(); } } + /** + * Version of {@see #getNamespaceInfo()} that is not protected by a lock. + */ + NamespaceInfo unprotectedGetNamespaceInfo() { + return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(), + getClusterId(), getBlockPoolId(), + dir.fsImage.getStorage().getCTime(), + upgradeManager.getUpgradeVersion()); + } + /** * Close down this file system manager. * Causes heartbeat and lease daemons to stop; waits briefly for @@ -2537,6 +2564,8 @@ private boolean nameNodeHasResourcesAvailable() { * @throws IOException */ private void checkAvailableResources() throws IOException { + Preconditions.checkState(nnResourceChecker != null, + "nnResourceChecker not initialized"); hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace(); } @@ -2697,7 +2726,7 @@ void saveNamespace() throws AccessControlException, IOException { throw new IOException("Safe mode should be turned ON " + "in order to create namespace image."); } - getFSImage().saveNamespace(); + getFSImage().saveNamespace(this); LOG.info("New namespace image has been created."); } finally { readUnlock(); @@ -2756,7 +2785,7 @@ void finalizeUpgrade() throws IOException { * not tracked because the name node is not intended to leave safe mode * automatically in the case. * - * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction) + * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction) * @see SafeModeMonitor */ class SafeModeInfo { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index b7587c0dd1..d8bd502597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper; @@ -120,7 +120,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response new HdfsConfiguration(datanode.getConf()); final int socketTimeout = conf.getInt( DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsConstants.READ_TIMEOUT); + HdfsServerConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 991d7f5c5d..6e4c17161a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -23,11 +23,14 @@ import java.io.File; import java.io.IOException; import java.util.List; +import java.util.HashMap; import java.util.Comparator; +import java.util.Collections; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; @@ -57,6 +60,9 @@ class FileJournalManager implements JournalManager { private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile( NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)"); + private File currentInProgress = null; + private long maxSeenTransaction = 0L; + @VisibleForTesting StoragePurger purger = new NNStorageRetentionManager.DeletionStoragePurger(); @@ -66,19 +72,20 @@ public FileJournalManager(StorageDirectory sd) { } @Override - public EditLogOutputStream startLogSegment(long txid) throws IOException { - File newInProgress = NNStorage.getInProgressEditsFile(sd, txid); - EditLogOutputStream stm = new EditLogFileOutputStream(newInProgress, + synchronized public EditLogOutputStream startLogSegment(long txid) + throws IOException { + currentInProgress = NNStorage.getInProgressEditsFile(sd, txid); + EditLogOutputStream stm = new EditLogFileOutputStream(currentInProgress, outputBufferCapacity); stm.create(); return stm; } @Override - public void finalizeLogSegment(long firstTxId, long lastTxId) + synchronized public void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException { - File inprogressFile = NNStorage.getInProgressEditsFile( - sd, firstTxId); + File inprogressFile = NNStorage.getInProgressEditsFile(sd, firstTxId); + File dstFile = NNStorage.getFinalizedEditsFile( sd, firstTxId, lastTxId); LOG.debug("Finalizing edits file " + inprogressFile + " -> " + dstFile); @@ -89,6 +96,9 @@ public void finalizeLogSegment(long firstTxId, long lastTxId) if (!inprogressFile.renameTo(dstFile)) { throw new IOException("Unable to finalize edits file " + inprogressFile); } + if (inprogressFile.equals(currentInProgress)) { + currentInProgress = null; + } } @VisibleForTesting @@ -97,12 +107,7 @@ public StorageDirectory getStorageDirectory() { } @Override - public String toString() { - return "FileJournalManager for storage directory " + sd; - } - - @Override - public void setOutputBufferCapacity(int size) { + synchronized public void setOutputBufferCapacity(int size) { this.outputBufferCapacity = size; } @@ -120,13 +125,6 @@ public void purgeLogsOlderThan(long minTxIdToKeep) } } - @Override - public EditLogInputStream getInProgressInputStream(long segmentStartsAtTxId) - throws IOException { - File f = NNStorage.getInProgressEditsFile(sd, segmentStartsAtTxId); - return new EditLogFileInputStream(f); - } - /** * Find all editlog segments starting at or above the given txid. * @param fromTxId the txnid which to start looking @@ -178,17 +176,156 @@ static List matchEditLogs(File[] filesInStorage) { try { long startTxId = Long.valueOf(inProgressEditsMatch.group(1)); ret.add( - new EditLogFile(f, startTxId, EditLogFile.UNKNOWN_END)); + new EditLogFile(f, startTxId, startTxId, true)); } catch (NumberFormatException nfe) { LOG.error("In-progress edits file " + f + " has improperly " + "formatted transaction ID"); // skip - } + } } } return ret; } + @Override + synchronized public EditLogInputStream getInputStream(long fromTxId) + throws IOException { + for (EditLogFile elf : getLogFiles(fromTxId)) { + if (elf.getFirstTxId() == fromTxId) { + if (elf.isInProgress()) { + elf.validateLog(); + } + if (LOG.isTraceEnabled()) { + LOG.trace("Returning edit stream reading from " + elf); + } + return new EditLogFileInputStream(elf.getFile(), + elf.getFirstTxId(), elf.getLastTxId()); + } + } + + throw new IOException("Cannot find editlog file with " + fromTxId + + " as first first txid"); + } + + @Override + public long getNumberOfTransactions(long fromTxId) + throws IOException, CorruptionException { + long numTxns = 0L; + + for (EditLogFile elf : getLogFiles(fromTxId)) { + if (LOG.isTraceEnabled()) { + LOG.trace("Counting " + elf); + } + if (elf.getFirstTxId() > fromTxId) { // there must be a gap + LOG.warn("Gap in transactions in " + sd.getRoot() + ". Gap is " + + fromTxId + " - " + (elf.getFirstTxId() - 1)); + break; + } else if (fromTxId == elf.getFirstTxId()) { + if (elf.isInProgress()) { + elf.validateLog(); + } + + if (elf.isCorrupt()) { + break; + } + fromTxId = elf.getLastTxId() + 1; + numTxns += fromTxId - elf.getFirstTxId(); + + if (elf.isInProgress()) { + break; + } + } // else skip + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Journal " + this + " has " + numTxns + + " txns from " + fromTxId); + } + + long max = findMaxTransaction(); + // fromTxId should be greater than max, as it points to the next + // transaction we should expect to find. If it is less than or equal + // to max, it means that a transaction with txid == max has not been found + if (numTxns == 0 && fromTxId <= max) { + String error = String.format("Gap in transactions, max txnid is %d" + + ", 0 txns from %d", max, fromTxId); + LOG.error(error); + throw new CorruptionException(error); + } + + return numTxns; + } + + @Override + synchronized public void recoverUnfinalizedSegments() throws IOException { + File currentDir = sd.getCurrentDir(); + List allLogFiles = matchEditLogs(currentDir.listFiles()); + + // make sure journal is aware of max seen transaction before moving corrupt + // files aside + findMaxTransaction(); + + for (EditLogFile elf : allLogFiles) { + if (elf.getFile().equals(currentInProgress)) { + continue; + } + if (elf.isInProgress()) { + elf.validateLog(); + + if (elf.isCorrupt()) { + elf.moveAsideCorruptFile(); + continue; + } + finalizeLogSegment(elf.getFirstTxId(), elf.getLastTxId()); + } + } + } + + private List getLogFiles(long fromTxId) throws IOException { + File currentDir = sd.getCurrentDir(); + List allLogFiles = matchEditLogs(currentDir.listFiles()); + List logFiles = Lists.newArrayList(); + + for (EditLogFile elf : allLogFiles) { + if (fromTxId > elf.getFirstTxId() + && fromTxId <= elf.getLastTxId()) { + throw new IOException("Asked for fromTxId " + fromTxId + + " which is in middle of file " + elf.file); + } + if (fromTxId <= elf.getFirstTxId()) { + logFiles.add(elf); + } + } + + Collections.sort(logFiles, EditLogFile.COMPARE_BY_START_TXID); + + return logFiles; + } + + /** + * Find the maximum transaction in the journal. + * This gets stored in a member variable, as corrupt edit logs + * will be moved aside, but we still need to remember their first + * tranaction id in the case that it was the maximum transaction in + * the journal. + */ + private long findMaxTransaction() + throws IOException { + for (EditLogFile elf : getLogFiles(0)) { + if (elf.isInProgress()) { + maxSeenTransaction = Math.max(elf.getFirstTxId(), maxSeenTransaction); + elf.validateLog(); + } + maxSeenTransaction = Math.max(elf.getLastTxId(), maxSeenTransaction); + } + return maxSeenTransaction; + } + + @Override + public String toString() { + return String.format("FileJournalManager(root=%s)", sd.getRoot()); + } + /** * Record of an edit log that has been located and had its filename parsed. */ @@ -196,12 +333,10 @@ static class EditLogFile { private File file; private final long firstTxId; private long lastTxId; - - private EditLogValidation cachedValidation = null; + private boolean isCorrupt = false; - - static final long UNKNOWN_END = -1; - + private final boolean isInProgress; + final static Comparator COMPARE_BY_START_TXID = new Comparator() { public int compare(EditLogFile a, EditLogFile b) { @@ -214,30 +349,24 @@ public int compare(EditLogFile a, EditLogFile b) { EditLogFile(File file, long firstTxId, long lastTxId) { - assert lastTxId == UNKNOWN_END || lastTxId >= firstTxId; - assert firstTxId > 0; + this(file, firstTxId, lastTxId, false); + assert (lastTxId != HdfsConstants.INVALID_TXID) + && (lastTxId >= firstTxId); + } + + EditLogFile(File file, long firstTxId, + long lastTxId, boolean isInProgress) { + assert (lastTxId == HdfsConstants.INVALID_TXID && isInProgress) + || (lastTxId != HdfsConstants.INVALID_TXID && lastTxId >= firstTxId); + assert (firstTxId > 0) || (firstTxId == HdfsConstants.INVALID_TXID); assert file != null; this.firstTxId = firstTxId; this.lastTxId = lastTxId; this.file = file; + this.isInProgress = isInProgress; } - public void finalizeLog() throws IOException { - long numTransactions = validateLog().numTransactions; - long lastTxId = firstTxId + numTransactions - 1; - File dst = new File(file.getParentFile(), - NNStorage.getFinalizedEditsFileName(firstTxId, lastTxId)); - LOG.info("Finalizing edits log " + file + " by renaming to " - + dst.getName()); - if (!file.renameTo(dst)) { - throw new IOException("Couldn't finalize log " + - file + " to " + dst); - } - this.lastTxId = lastTxId; - file = dst; - } - long getFirstTxId() { return firstTxId; } @@ -246,15 +375,22 @@ long getLastTxId() { return lastTxId; } - EditLogValidation validateLog() throws IOException { - if (cachedValidation == null) { - cachedValidation = EditLogFileInputStream.validateEditLog(file); + /** + * Count the number of valid transactions in a log. + * This will update the lastTxId of the EditLogFile or + * mark it as corrupt if it is. + */ + void validateLog() throws IOException { + EditLogValidation val = EditLogFileInputStream.validateEditLog(file); + if (val.getNumTransactions() == 0) { + markCorrupt(); + } else { + this.lastTxId = val.getEndTxId(); } - return cachedValidation; } boolean isInProgress() { - return (lastTxId == UNKNOWN_END); + return isInProgress; } File getFile() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java index 3831b4580f..8476e27cdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java index 2c0f81abc5..4fc9dcca63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java @@ -75,7 +75,7 @@ public Void run() throws Exception { + ":" + NameNode.getAddress(conf).getPort(); Token token = - nn.getDelegationToken(new Text(renewerFinal)); + nn.getRpcServer().getDelegationToken(new Text(renewerFinal)); if(token == null) { throw new Exception("couldn't get the token for " +s); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 7663ecff76..2440c4dd12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; /** * I-node for file being written. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java index d62aaa7e5a..8440fe049b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java @@ -41,6 +41,25 @@ interface JournalManager { */ void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException; + /** + * Get the input stream starting with fromTxnId from this journal manager + * @param fromTxnId the first transaction id we want to read + * @return the stream starting with transaction fromTxnId + * @throws IOException if a stream cannot be found. + */ + EditLogInputStream getInputStream(long fromTxnId) throws IOException; + + /** + * Get the number of transaction contiguously available from fromTxnId. + * + * @param fromTxnId Transaction id to count from + * @return The number of transactions available from fromTxnId + * @throws IOException if the journal cannot be read. + * @throws CorruptionException if there is a gap in the journal at fromTxnId. + */ + long getNumberOfTransactions(long fromTxnId) + throws IOException, CorruptionException; + /** * Set the amount of memory that this stream should use to buffer edits */ @@ -59,10 +78,21 @@ void purgeLogsOlderThan(long minTxIdToKeep) throws IOException; /** - * @return an EditLogInputStream that reads from the same log that - * the edit log is currently writing. May return null if this journal - * manager does not support this operation. - */ - EditLogInputStream getInProgressInputStream(long segmentStartsAtTxId) - throws IOException; + * Recover segments which have not been finalized. + */ + void recoverUnfinalizedSegments() throws IOException; + + /** + * Indicate that a journal is cannot be used to load a certain range of + * edits. + * This exception occurs in the case of a gap in the transactions, or a + * corrupt edit file. + */ + public static class CorruptionException extends IOException { + static final long serialVersionUID = -4687802717006172702L; + + public CorruptionException(String reason) { + super(reason); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 257d37e0cb..44857739b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -32,8 +32,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import static org.apache.hadoop.hdfs.server.common.Util.now; @@ -65,8 +65,8 @@ public class LeaseManager { private final FSNamesystem fsnamesystem; - private long softLimit = FSConstants.LEASE_SOFTLIMIT_PERIOD; - private long hardLimit = FSConstants.LEASE_HARDLIMIT_PERIOD; + private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; + private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD; // // Used for handling lock-leases @@ -379,7 +379,7 @@ public void run() { try { - Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL); + Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL); } catch(InterruptedException ie) { if (LOG.isDebugEnabled()) { LOG.debug(name + " is interrupted", ie); @@ -409,7 +409,7 @@ private synchronized void checkLeases() { oldest.getPaths().toArray(leasePaths); for(String p : leasePaths) { try { - if(fsnamesystem.internalReleaseLease(oldest, p, HdfsConstants.NAMENODE_LEASE_HOLDER)) { + if(fsnamesystem.internalReleaseLease(oldest, p, HdfsServerConstants.NAMENODE_LEASE_HOLDER)) { LOG.info("Lease recovery for file " + p + " is complete. File closed."); removing.add(p); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 00461e2fb3..869922abb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -42,11 +42,11 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.UpgradeManager; @@ -126,7 +126,7 @@ public boolean isOfType(StorageDirType type) { * recent fsimage file. This does not include any transactions * that have since been written to the edit log. */ - protected long mostRecentCheckpointTxId = FSConstants.INVALID_TXID; + protected long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID; /** * list of failed (and thus removed) storages @@ -501,7 +501,7 @@ private void format(StorageDirectory sd) throws IOException { * Format all available storage directories. */ public void format(String clusterId) throws IOException { - this.layoutVersion = FSConstants.LAYOUT_VERSION; + this.layoutVersion = HdfsConstants.LAYOUT_VERSION; this.namespaceID = newNamespaceID(); this.clusterID = clusterId; this.blockpoolID = newBlockPoolID(); @@ -574,7 +574,7 @@ private void setDeprecatedPropertiesForUpgrade(Properties props) { * This should only be used during upgrades. */ String getDeprecatedProperty(String prop) { - assert getLayoutVersion() > FSConstants.LAYOUT_VERSION : + assert getLayoutVersion() > HdfsConstants.LAYOUT_VERSION : "getDeprecatedProperty should only be done when loading " + "storage from past versions during upgrade."; return deprecatedProperties.get(prop); @@ -764,7 +764,7 @@ void verifyDistributedUpgradeProgress(StartupOption startOpt if(upgradeManager.getDistributedUpgrades() != null) throw new IOException("\n Distributed upgrade for NameNode version " + upgradeManager.getUpgradeVersion() - + " to current LV " + FSConstants.LAYOUT_VERSION + + " to current LV " + HdfsConstants.LAYOUT_VERSION + " is required.\n Please restart NameNode" + " with -upgrade option."); } @@ -780,7 +780,7 @@ void initializeDistributedUpgrade() throws IOException { writeAll(); LOG.info("\n Distributed upgrade for NameNode version " + upgradeManager.getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is initialized."); + + HdfsConstants.LAYOUT_VERSION + " is initialized."); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index fe32cdd63d..ed972c8f96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -21,9 +21,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; -import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -34,82 +32,37 @@ import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.FsServerDefaults; -import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Trash; -import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.permission.PermissionStatus; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; -import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_LENGTH; -import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_DEPTH; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; -import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; -import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; -import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; -import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; -import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.io.EnumSetWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.Node; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; -import org.apache.hadoop.security.token.SecretManager.InvalidToken; -import org.apache.hadoop.security.token.Token; import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; @@ -152,7 +105,7 @@ * NameNode state, for example partial blocksMap etc. **********************************************************/ @InterfaceAudience.Private -public class NameNode implements NamenodeProtocols { +public class NameNode { static{ HdfsConfiguration.init(); } @@ -219,12 +172,6 @@ public long getProtocolVersion(String protocol, } - @Override // VersionedProtocol - public ProtocolSignature getProtocolSignature(String protocol, - long clientVersion, int clientMethodsHash) throws IOException { - return ProtocolSignature.getProtocolSignature( - this, protocol, clientVersion, clientMethodsHash); - } public static final int DEFAULT_PORT = 8020; @@ -239,18 +186,6 @@ public ProtocolSignature getProtocolSignature(String protocol, private final boolean haEnabled; - /** RPC server. Package-protected for use in tests. */ - Server server; - /** RPC server for HDFS Services communication. - BackupNode, Datanodes and all other services - should be connecting to this server if it is - configured. Clients should only go to NameNode#server - */ - protected Server serviceRpcServer; - /** RPC server address */ - protected InetSocketAddress rpcAddress = null; - /** RPC server for DN address */ - protected InetSocketAddress serviceRPCAddress = null; /** httpServer */ protected NameNodeHttpServer httpServer; private Thread emptier; @@ -258,11 +193,11 @@ public ProtocolSignature getProtocolSignature(String protocol, protected boolean stopRequested = false; /** Registration information of this name-node */ protected NamenodeRegistration nodeRegistration; - /** Is service level authorization enabled? */ - private boolean serviceAuthEnabled = false; /** Activated plug-ins. */ private List plugins; + private NameNodeRpcServer rpcServer; + /** Format a new filesystem. Destroys any filesystem that may already * exist at this location. **/ public static void format(Configuration conf) throws IOException { @@ -278,6 +213,10 @@ public FSNamesystem getNamesystem() { return namesystem; } + public NamenodeProtocols getRpcServer() { + return rpcServer; + } + static void initMetrics(Configuration conf, NamenodeRole role) { metrics = NameNodeMetrics.create(conf, role); } @@ -327,19 +266,19 @@ public static InetSocketAddress getAddress(Configuration conf) { * @param filesystemURI * @return address of file system */ - public static InetSocketAddress getAddress(URI filesystemURI) { + static InetSocketAddress getAddress(URI filesystemURI) { String authority = filesystemURI.getAuthority(); if (authority == null) { throw new IllegalArgumentException(String.format( "Invalid URI for NameNode address (check %s): %s has no authority.", FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString())); } - if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase( + if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase( filesystemURI.getScheme())) { throw new IllegalArgumentException(String.format( "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.", FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(), - FSConstants.HDFS_URI_SCHEME)); + HdfsConstants.HDFS_URI_SCHEME)); } return getAddress(authority); } @@ -347,7 +286,7 @@ public static InetSocketAddress getAddress(URI filesystemURI) { public static URI getUri(InetSocketAddress namenode) { int port = namenode.getPort(); String portString = port == DEFAULT_PORT ? "" : (":"+port); - return URI.create(FSConstants.HDFS_URI_SCHEME + "://" + return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + namenode.getHostName()+portString); } @@ -385,11 +324,13 @@ protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOExc /** * Modifies the configuration passed to contain the service rpc address setting */ - protected void setRpcServiceServerAddress(Configuration conf) { + protected void setRpcServiceServerAddress(Configuration conf, + InetSocketAddress serviceRPCAddress) { setServiceAddress(conf, getHostPortString(serviceRPCAddress)); } - protected void setRpcServerAddress(Configuration conf) { + protected void setRpcServerAddress(Configuration conf, + InetSocketAddress rpcAddress) { FileSystem.setDefaultUri(conf, getUri(rpcAddress)); } @@ -404,7 +345,7 @@ protected void setHttpServerAddress(Configuration conf) { } protected void loadNamesystem(Configuration conf) throws IOException { - this.namesystem = new FSNamesystem(conf); + this.namesystem = FSNamesystem.loadFromDisk(conf); } NamenodeRegistration getRegistration() { @@ -413,7 +354,7 @@ NamenodeRegistration getRegistration() { NamenodeRegistration setRegistration() { nodeRegistration = new NamenodeRegistration( - getHostPortString(rpcAddress), + getHostPortString(rpcServer.getRpcAddress()), getHostPortString(getHttpAddress()), getFSImage().getStorage(), getRole()); return nodeRegistration; @@ -435,45 +376,13 @@ void loginAsNameNodeUser(Configuration conf) throws IOException { */ protected void initialize(Configuration conf) throws IOException { initializeGenericKeys(conf); - InetSocketAddress socAddr = getRpcServerAddress(conf); UserGroupInformation.setConfiguration(conf); loginAsNameNodeUser(conf); - int handlerCount = - conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, - DFS_DATANODE_HANDLER_COUNT_DEFAULT); NameNode.initMetrics(conf, this.getRole()); loadNamesystem(conf); - // create rpc server - InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf); - if (dnSocketAddr != null) { - int serviceHandlerCount = - conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, - DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); - this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this, - dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount, - false, conf, namesystem.getDelegationTokenSecretManager()); - this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress(); - setRpcServiceServerAddress(conf); - } - this.server = RPC.getServer(NamenodeProtocols.class, this, - socAddr.getHostName(), socAddr.getPort(), - handlerCount, false, conf, - namesystem.getDelegationTokenSecretManager()); - // set service-level authorization security policy - if (serviceAuthEnabled = - conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { - this.server.refreshServiceAcl(conf, new HDFSPolicyProvider()); - if (this.serviceRpcServer != null) { - this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); - } - } - - // The rpc-server port can be ephemeral... ensure we have the correct info - this.rpcAddress = this.server.getListenerAddress(); - setRpcServerAddress(conf); + rpcServer = createRpcServer(conf); try { validateConfigurationSettings(conf); @@ -485,6 +394,15 @@ protected void initialize(Configuration conf) throws IOException { activate(conf); } + /** + * Create the RPC server implementation. Used as an extension point for the + * BackupNode. + */ + protected NameNodeRpcServer createRpcServer(Configuration conf) + throws IOException { + return new NameNodeRpcServer(conf, this); + } + /** * Verifies that the final Configuration Settings look ok for the NameNode to * properly start up @@ -517,10 +435,7 @@ void activate(Configuration conf) throws IOException { } namesystem.activate(conf); startHttpServer(conf); - server.start(); //start RPC server - if (serviceRpcServer != null) { - serviceRpcServer.start(); - } + rpcServer.start(); startTrashEmptier(conf); plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY, @@ -532,9 +447,10 @@ void activate(Configuration conf) throws IOException { LOG.warn("ServicePlugin " + p + " could not be started", t); } } - LOG.info(getRole() + " up at: " + rpcAddress); - if (serviceRPCAddress != null) { - LOG.info(getRole() + " service server is up at: " + serviceRPCAddress); + + LOG.info(getRole() + " up at: " + rpcServer.getRpcAddress()); + if (rpcServer.getServiceRpcAddress() != null) { + LOG.info(getRole() + " service server is up at: " + rpcServer.getServiceRpcAddress()); } } @@ -605,7 +521,7 @@ protected NameNode(Configuration conf, NamenodeRole role) */ public void join() { try { - this.server.join(); + this.rpcServer.join(); } catch (InterruptedException ie) { } } @@ -635,8 +551,7 @@ public void stop() { } if(namesystem != null) namesystem.close(); if(emptier != null) emptier.interrupt(); - if(server != null) server.stop(); - if(serviceRpcServer != null) serviceRpcServer.stop(); + if(rpcServer != null) rpcServer.stop(); if (metrics != null) { metrics.shutdown(); } @@ -649,440 +564,6 @@ synchronized boolean isStopRequested() { return stopRequested; } - ///////////////////////////////////////////////////// - // NamenodeProtocol - ///////////////////////////////////////////////////// - @Override // NamenodeProtocol - public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) - throws IOException { - if(size <= 0) { - throw new IllegalArgumentException( - "Unexpected not positive size: "+size); - } - - return namesystem.getBlockManager().getBlocks(datanode, size); - } - - @Override // NamenodeProtocol - public ExportedBlockKeys getBlockKeys() throws IOException { - return namesystem.getBlockManager().getBlockKeys(); - } - - @Override // NamenodeProtocol - public void errorReport(NamenodeRegistration registration, - int errorCode, - String msg) throws IOException { - checkOperation(OperationCategory.WRITE); - verifyRequest(registration); - LOG.info("Error report from " + registration + ": " + msg); - if(errorCode == FATAL) - namesystem.releaseBackupNode(registration); - } - - @Override // NamenodeProtocol - public NamenodeRegistration register(NamenodeRegistration registration) - throws IOException { - verifyVersion(registration.getVersion()); - NamenodeRegistration myRegistration = setRegistration(); - namesystem.registerBackupNode(registration, myRegistration); - return myRegistration; - } - - @Override // NamenodeProtocol - public NamenodeCommand startCheckpoint(NamenodeRegistration registration) - throws IOException { - verifyRequest(registration); - if(!isRole(NamenodeRole.NAMENODE)) - throw new IOException("Only an ACTIVE node can invoke startCheckpoint."); - return namesystem.startCheckpoint(registration, setRegistration()); - } - - @Override // NamenodeProtocol - public void endCheckpoint(NamenodeRegistration registration, - CheckpointSignature sig) throws IOException { - checkOperation(OperationCategory.CHECKPOINT); - namesystem.endCheckpoint(registration, sig); - } - - @Override // ClientProtocol - public Token getDelegationToken(Text renewer) - throws IOException { - checkOperation(OperationCategory.WRITE); - return namesystem.getDelegationToken(renewer); - } - - @Override // ClientProtocol - public long renewDelegationToken(Token token) - throws InvalidToken, IOException { - checkOperation(OperationCategory.WRITE); - return namesystem.renewDelegationToken(token); - } - - @Override // ClientProtocol - public void cancelDelegationToken(Token token) - throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.cancelDelegationToken(token); - } - - @Override // ClientProtocol - public LocatedBlocks getBlockLocations(String src, - long offset, - long length) - throws IOException { - checkOperation(OperationCategory.READ); - metrics.incrGetBlockLocations(); - return namesystem.getBlockLocations(getClientMachine(), - src, offset, length); - } - - @Override // ClientProtocol - public FsServerDefaults getServerDefaults() throws IOException { - return namesystem.getServerDefaults(); - } - - @Override // ClientProtocol - public void create(String src, - FsPermission masked, - String clientName, - EnumSetWritable flag, - boolean createParent, - short replication, - long blockSize) throws IOException { - checkOperation(OperationCategory.WRITE); - String clientMachine = getClientMachine(); - if (stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*DIR* NameNode.create: file " - +src+" for "+clientName+" at "+clientMachine); - } - if (!checkPathLength(src)) { - throw new IOException("create: Pathname too long. Limit " - + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); - } - namesystem.startFile(src, - new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), - null, masked), - clientName, clientMachine, flag.get(), createParent, replication, blockSize); - metrics.incrFilesCreated(); - metrics.incrCreateFileOps(); - } - - @Override // ClientProtocol - public LocatedBlock append(String src, String clientName) - throws IOException { - checkOperation(OperationCategory.WRITE); - String clientMachine = getClientMachine(); - if (stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*DIR* NameNode.append: file " - +src+" for "+clientName+" at "+clientMachine); - } - LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine); - metrics.incrFilesAppended(); - return info; - } - - @Override // ClientProtocol - public boolean recoverLease(String src, String clientName) throws IOException { - checkOperation(OperationCategory.WRITE); - String clientMachine = getClientMachine(); - return namesystem.recoverLease(src, clientName, clientMachine); - } - - @Override // ClientProtocol - public boolean setReplication(String src, short replication) - throws IOException { - checkOperation(OperationCategory.WRITE); - return namesystem.setReplication(src, replication); - } - - @Override // ClientProtocol - public void setPermission(String src, FsPermission permissions) - throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.setPermission(src, permissions); - } - - @Override // ClientProtocol - public void setOwner(String src, String username, String groupname) - throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.setOwner(src, username, groupname); - } - - @Override // ClientProtocol - public LocatedBlock addBlock(String src, - String clientName, - ExtendedBlock previous, - DatanodeInfo[] excludedNodes) - throws IOException { - checkOperation(OperationCategory.WRITE); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " - +src+" for "+clientName); - } - HashMap excludedNodesSet = null; - if (excludedNodes != null) { - excludedNodesSet = new HashMap(excludedNodes.length); - for (Node node:excludedNodes) { - excludedNodesSet.put(node, node); - } - } - LocatedBlock locatedBlock = - namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet); - if (locatedBlock != null) - metrics.incrAddBlockOps(); - return locatedBlock; - } - - @Override // ClientProtocol - public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk, - final DatanodeInfo[] existings, final DatanodeInfo[] excludes, - final int numAdditionalNodes, final String clientName - ) throws IOException { - checkOperation(OperationCategory.WRITE); - if (LOG.isDebugEnabled()) { - LOG.debug("getAdditionalDatanode: src=" + src - + ", blk=" + blk - + ", existings=" + Arrays.asList(existings) - + ", excludes=" + Arrays.asList(excludes) - + ", numAdditionalNodes=" + numAdditionalNodes - + ", clientName=" + clientName); - } - - metrics.incrGetAdditionalDatanodeOps(); - - HashMap excludeSet = null; - if (excludes != null) { - excludeSet = new HashMap(excludes.length); - for (Node node : excludes) { - excludeSet.put(node, node); - } - } - return namesystem.getAdditionalDatanode(src, blk, - existings, excludeSet, numAdditionalNodes, clientName); - } - - /** - * The client needs to give up on the block. - */ - @Override // ClientProtocol - public void abandonBlock(ExtendedBlock b, String src, String holder) - throws IOException { - checkOperation(OperationCategory.WRITE); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: " - +b+" of file "+src); - } - if (!namesystem.abandonBlock(b, src, holder)) { - throw new IOException("Cannot abandon block during write to " + src); - } - } - - @Override // ClientProtocol - public boolean complete(String src, String clientName, ExtendedBlock last) - throws IOException { - checkOperation(OperationCategory.WRITE); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*DIR* NameNode.complete: " - + src + " for " + clientName); - } - return namesystem.completeFile(src, clientName, last); - } - - /** - * The client has detected an error on the specified located blocks - * and is reporting them to the server. For now, the namenode will - * mark the block as corrupt. In the future we might - * check the blocks are actually corrupt. - */ - @Override // ClientProtocol, DatanodeProtocol - public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { - checkOperation(OperationCategory.WRITE); - stateChangeLog.info("*DIR* NameNode.reportBadBlocks"); - for (int i = 0; i < blocks.length; i++) { - ExtendedBlock blk = blocks[i].getBlock(); - DatanodeInfo[] nodes = blocks[i].getLocations(); - for (int j = 0; j < nodes.length; j++) { - DatanodeInfo dn = nodes[j]; - namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn); - } - } - } - - @Override // ClientProtocol - public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName) - throws IOException { - checkOperation(OperationCategory.WRITE); - return namesystem.updateBlockForPipeline(block, clientName); - } - - - @Override // ClientProtocol - public void updatePipeline(String clientName, ExtendedBlock oldBlock, - ExtendedBlock newBlock, DatanodeID[] newNodes) - throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes); - } - - @Override // DatanodeProtocol - public void commitBlockSynchronization(ExtendedBlock block, - long newgenerationstamp, long newlength, - boolean closeFile, boolean deleteblock, DatanodeID[] newtargets) - throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.commitBlockSynchronization(block, - newgenerationstamp, newlength, closeFile, deleteblock, newtargets); - } - - @Override // ClientProtocol - public long getPreferredBlockSize(String filename) - throws IOException { - checkOperation(OperationCategory.READ); - return namesystem.getPreferredBlockSize(filename); - } - - @Deprecated - @Override // ClientProtocol - public boolean rename(String src, String dst) throws IOException { - checkOperation(OperationCategory.WRITE); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst); - } - if (!checkPathLength(dst)) { - throw new IOException("rename: Pathname too long. Limit " - + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); - } - boolean ret = namesystem.renameTo(src, dst); - if (ret) { - metrics.incrFilesRenamed(); - } - return ret; - } - - @Override // ClientProtocol - public void concat(String trg, String[] src) throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.concat(trg, src); - } - - @Override // ClientProtocol - public void rename(String src, String dst, Options.Rename... options) - throws IOException { - checkOperation(OperationCategory.WRITE); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst); - } - if (!checkPathLength(dst)) { - throw new IOException("rename: Pathname too long. Limit " - + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); - } - namesystem.renameTo(src, dst, options); - metrics.incrFilesRenamed(); - } - - @Deprecated - @Override // ClientProtocol - public boolean delete(String src) throws IOException { - checkOperation(OperationCategory.WRITE); - return delete(src, true); - } - - @Override // ClientProtocol - public boolean delete(String src, boolean recursive) throws IOException { - checkOperation(OperationCategory.WRITE); - if (stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*DIR* Namenode.delete: src=" + src - + ", recursive=" + recursive); - } - boolean ret = namesystem.delete(src, recursive); - if (ret) - metrics.incrDeleteFileOps(); - return ret; - } - - /** - * Check path length does not exceed maximum. Returns true if - * length and depth are okay. Returns false if length is too long - * or depth is too great. - */ - private boolean checkPathLength(String src) { - Path srcPath = new Path(src); - return (src.length() <= MAX_PATH_LENGTH && - srcPath.depth() <= MAX_PATH_DEPTH); - } - - @Override // ClientProtocol - public boolean mkdirs(String src, FsPermission masked, boolean createParent) - throws IOException { - checkOperation(OperationCategory.WRITE); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src); - } - if (!checkPathLength(src)) { - throw new IOException("mkdirs: Pathname too long. Limit " - + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); - } - return namesystem.mkdirs(src, - new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), - null, masked), createParent); - } - - @Override // ClientProtocol - public void renewLease(String clientName) throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.renewLease(clientName); - } - - @Override // ClientProtocol - public DirectoryListing getListing(String src, byte[] startAfter, - boolean needLocation) throws IOException { - checkOperation(OperationCategory.READ); - DirectoryListing files = namesystem.getListing( - src, startAfter, needLocation); - if (files != null) { - metrics.incrGetListingOps(); - metrics.incrFilesInGetListingOps(files.getPartialListing().length); - } - return files; - } - - @Override // ClientProtocol - public HdfsFileStatus getFileInfo(String src) throws IOException { - checkOperation(OperationCategory.READ); - metrics.incrFileInfoOps(); - return namesystem.getFileInfo(src, true); - } - - @Override // ClientProtocol - public HdfsFileStatus getFileLinkInfo(String src) throws IOException { - checkOperation(OperationCategory.READ); - metrics.incrFileInfoOps(); - return namesystem.getFileInfo(src, false); - } - - @Override - public long[] getStats() { - return namesystem.getStats(); - } - - @Override // ClientProtocol - public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) - throws IOException { - checkOperation(OperationCategory.READ); - DatanodeInfo results[] = namesystem.datanodeReport(type); - if (results == null ) { - throw new IOException("Cannot find datanode report"); - } - return results; - } - - @Override // ClientProtocol - public boolean setSafeMode(SafeModeAction action) throws IOException { - // TODO:HA decide on OperationCategory for this - return namesystem.setSafeMode(action); - } - /** * Is the cluster currently in safe mode? */ @@ -1090,275 +571,8 @@ public boolean isInSafeMode() { return namesystem.isInSafeMode(); } - @Override // ClientProtocol - public boolean restoreFailedStorage(String arg) - throws AccessControlException { - // TODO:HA decide on OperationCategory for this - return namesystem.restoreFailedStorage(arg); - } - - @Override // ClientProtocol - public void saveNamespace() throws IOException { - // TODO:HA decide on OperationCategory for this - namesystem.saveNamespace(); - } - - @Override // ClientProtocol - public void refreshNodes() throws IOException { - // TODO:HA decide on OperationCategory for this - namesystem.getBlockManager().getDatanodeManager().refreshNodes( - new HdfsConfiguration()); - } - - @Override // NamenodeProtocol - public long getTransactionID() { - // TODO:HA decide on OperationCategory for this - return namesystem.getEditLog().getSyncTxId(); - } - - @Override // NamenodeProtocol - public CheckpointSignature rollEditLog() throws IOException { - // TODO:HA decide on OperationCategory for this - return namesystem.rollEditLog(); - } - - @Override // NamenodeProtocol - public RemoteEditLogManifest getEditLogManifest(long sinceTxId) - throws IOException { - // TODO:HA decide on OperationCategory for this - return namesystem.getEditLog().getEditLogManifest(sinceTxId); - } - - @Override // ClientProtocol - public void finalizeUpgrade() throws IOException { - // TODO:HA decide on OperationCategory for this - namesystem.finalizeUpgrade(); - } - - @Override // ClientProtocol - public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) - throws IOException { - // TODO:HA decide on OperationCategory for this - return namesystem.distributedUpgradeProgress(action); - } - - @Override // ClientProtocol - public void metaSave(String filename) throws IOException { - // TODO:HA decide on OperationCategory for this - namesystem.metaSave(filename); - } - - @Override // ClientProtocol - public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) - throws IOException { - checkOperation(OperationCategory.READ); - Collection fbs = - namesystem.listCorruptFileBlocks(path, cookie); - - String[] files = new String[fbs.size()]; - String lastCookie = ""; - int i = 0; - for(FSNamesystem.CorruptFileBlockInfo fb: fbs) { - files[i++] = fb.path; - lastCookie = fb.block.getBlockName(); - } - return new CorruptFileBlocks(files, lastCookie); - } - - /** - * Tell all datanodes to use a new, non-persistent bandwidth value for - * dfs.datanode.balance.bandwidthPerSec. - * @param bandwidth Blanacer bandwidth in bytes per second for all datanodes. - * @throws IOException - */ - @Override // ClientProtocol - public void setBalancerBandwidth(long bandwidth) throws IOException { - // TODO:HA decide on OperationCategory for this - namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); - } - - @Override // ClientProtocol - public ContentSummary getContentSummary(String path) throws IOException { - checkOperation(OperationCategory.READ); - return namesystem.getContentSummary(path); - } - - @Override // ClientProtocol - public void setQuota(String path, long namespaceQuota, long diskspaceQuota) - throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.setQuota(path, namespaceQuota, diskspaceQuota); - } - - @Override // ClientProtocol - public void fsync(String src, String clientName) throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.fsync(src, clientName); - } - - @Override // ClientProtocol - public void setTimes(String src, long mtime, long atime) - throws IOException { - checkOperation(OperationCategory.WRITE); - namesystem.setTimes(src, mtime, atime); - } - - @Override // ClientProtocol - public void createSymlink(String target, String link, FsPermission dirPerms, - boolean createParent) throws IOException { - checkOperation(OperationCategory.WRITE); - metrics.incrCreateSymlinkOps(); - /* We enforce the MAX_PATH_LENGTH limit even though a symlink target - * URI may refer to a non-HDFS file system. - */ - if (!checkPathLength(link)) { - throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + - " character limit"); - - } - if ("".equals(target)) { - throw new IOException("Invalid symlink target"); - } - final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - namesystem.createSymlink(target, link, - new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent); - } - - @Override // ClientProtocol - public String getLinkTarget(String path) throws IOException { - checkOperation(OperationCategory.READ); - metrics.incrGetLinkTargetOps(); - /* Resolves the first symlink in the given path, returning a - * new path consisting of the target of the symlink and any - * remaining path components from the original path. - */ - try { - HdfsFileStatus stat = namesystem.getFileInfo(path, false); - if (stat != null) { - // NB: getSymlink throws IOException if !stat.isSymlink() - return stat.getSymlink(); - } - } catch (UnresolvedPathException e) { - return e.getResolvedPath().toString(); - } catch (UnresolvedLinkException e) { - // The NameNode should only throw an UnresolvedPathException - throw new AssertionError("UnresolvedLinkException thrown"); - } - return null; - } - - - @Override // DatanodeProtocol - public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg) - throws IOException { - verifyVersion(nodeReg.getVersion()); - namesystem.registerDatanode(nodeReg); - - return nodeReg; - } - - @Override // DatanodeProtocol - public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg, - long capacity, long dfsUsed, long remaining, long blockPoolUsed, - int xmitsInProgress, int xceiverCount, int failedVolumes) - throws IOException { - verifyRequest(nodeReg); - return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining, - blockPoolUsed, xceiverCount, xmitsInProgress, failedVolumes); - } - - @Override // DatanodeProtocol - public DatanodeCommand blockReport(DatanodeRegistration nodeReg, - String poolId, long[] blocks) throws IOException { - verifyRequest(nodeReg); - BlockListAsLongs blist = new BlockListAsLongs(blocks); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.blockReport: " - + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks() - + " blocks"); - } - - namesystem.getBlockManager().processReport(nodeReg, poolId, blist); - if (getFSImage().isUpgradeFinalized()) - return new DatanodeCommand.Finalize(poolId); - return null; - } - - @Override // DatanodeProtocol - public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, - ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks) throws IOException { - verifyRequest(nodeReg); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: " - +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length - +" blocks."); - } - namesystem.getBlockManager().blockReceivedAndDeleted( - nodeReg, poolId, receivedAndDeletedBlocks); - } - - @Override // DatanodeProtocol - public void errorReport(DatanodeRegistration nodeReg, - int errorCode, String msg) throws IOException { - String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName()); - - if (errorCode == DatanodeProtocol.NOTIFY) { - LOG.info("Error report from " + dnName + ": " + msg); - return; - } - verifyRequest(nodeReg); - - if (errorCode == DatanodeProtocol.DISK_ERROR) { - LOG.warn("Disk error on " + dnName + ": " + msg); - } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { - LOG.warn("Fatal disk error on " + dnName + ": " + msg); - namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg); - } else { - LOG.info("Error report from " + dnName + ": " + msg); - } - } - - @Override // DatanodeProtocol, NamenodeProtocol - public NamespaceInfo versionRequest() throws IOException { - return namesystem.getNamespaceInfo(); - } - - @Override // DatanodeProtocol - public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException { - return namesystem.processDistributedUpgradeCommand(comm); - } - - /** - * Verify request. - * - * Verifies correctness of the datanode version, registration ID, and - * if the datanode does not need to be shutdown. - * - * @param nodeReg data node registration - * @throws IOException - */ - public void verifyRequest(NodeRegistration nodeReg) throws IOException { - verifyVersion(nodeReg.getVersion()); - if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) { - LOG.warn("Invalid registrationID - expected: " - + namesystem.getRegistrationID() + " received: " - + nodeReg.getRegistrationID()); - throw new UnregisteredNodeException(nodeReg); - } - } - - /** - * Verify version. - * - * @param version - * @throws IOException - */ - public void verifyVersion(int version) throws IOException { - if (version != FSConstants.LAYOUT_VERSION) - throw new IncorrectVersionException(version, "data node"); - } - - public FSImage getFSImage() { + /** get FSImage */ + FSImage getFSImage() { return namesystem.dir.fsImage; } @@ -1367,7 +581,7 @@ public FSImage getFSImage() { * @return namenode rpc address */ public InetSocketAddress getNameNodeAddress() { - return rpcAddress; + return rpcServer.getRpcAddress(); } /** @@ -1376,7 +590,7 @@ public InetSocketAddress getNameNodeAddress() { * @return namenode service rpc address used by datanodes */ public InetSocketAddress getServiceRpcAddress() { - return serviceRPCAddress != null ? serviceRPCAddress : rpcAddress; + return rpcServer.getServiceRpcAddress() != null ? rpcServer.getServiceRpcAddress() : rpcServer.getRpcAddress(); } /** @@ -1437,16 +651,16 @@ private static boolean format(Configuration conf, } System.out.println("Formatting using clusterid: " + clusterId); - FSImage fsImage = new FSImage(conf, null, dirsToFormat, editDirsToFormat); - FSNamesystem nsys = new FSNamesystem(fsImage, conf); - nsys.dir.fsImage.format(clusterId); + FSImage fsImage = new FSImage(conf, dirsToFormat, editDirsToFormat); + FSNamesystem fsn = new FSNamesystem(conf, fsImage); + fsImage.format(fsn, clusterId); return false; } private static boolean finalize(Configuration conf, boolean isConfirmationNeeded ) throws IOException { - FSNamesystem nsys = new FSNamesystem(new FSImage(conf), conf); + FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( "\"finalize\" will remove the previous state of the files system.\n" + "Recent upgrade will become permanent.\n" @@ -1461,40 +675,6 @@ private static boolean finalize(Configuration conf, return false; } - @Override // RefreshAuthorizationPolicyProtocol - public void refreshServiceAcl() throws IOException { - if (!serviceAuthEnabled) { - throw new AuthorizationException("Service Level Authorization not enabled!"); - } - - this.server.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider()); - if (this.serviceRpcServer != null) { - this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider()); - } - } - - @Override // RefreshAuthorizationPolicyProtocol - public void refreshUserToGroupsMappings() throws IOException { - LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + - UserGroupInformation.getCurrentUser().getShortUserName()); - Groups.getUserToGroupsMappingService().refresh(); - } - - @Override // RefreshAuthorizationPolicyProtocol - public void refreshSuperUserGroupsConfiguration() { - LOG.info("Refreshing SuperUser proxy group mapping list "); - - ProxyUsers.refreshSuperUserGroupsConfiguration(); - } - - @Override // GetUserMappingsProtocol - public String[] getGroupsForUser(String user) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Getting groups for user " + user); - } - return UserGroupInformation.createRemoteUser(user).getGroupNames(); - } - private static void printUsage() { System.err.println( "Usage: java NameNode [" + @@ -1647,7 +827,7 @@ public static void initializeGenericKeys(Configuration conf) { DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS); if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { - URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString()); } @@ -1666,17 +846,8 @@ public static void main(String argv[]) throws Exception { System.exit(-1); } } - - private static String getClientMachine() { - String clientMachine = Server.getRemoteAddress(); - if (clientMachine == null) { - clientMachine = ""; - } - return clientMachine; - } - - @Override // HAServiceProtocol - public synchronized void monitorHealth() throws HealthCheckFailedException { + + synchronized void monitorHealth() throws HealthCheckFailedException { if (!haEnabled) { return; // no-op, if HA is not eanbled } @@ -1684,16 +855,14 @@ public synchronized void monitorHealth() throws HealthCheckFailedException { return; } - @Override // HAServiceProtocol - public synchronized void transitionToActive() throws ServiceFailedException { + synchronized void transitionToActive() throws ServiceFailedException { if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } state.setState(this, ACTIVE_STATE); } - @Override // HAServiceProtocol - public synchronized void transitionToStandby() throws ServiceFailedException { + synchronized void transitionToStandby() throws ServiceFailedException { if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java index a15cdecabc..24f999e170 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java @@ -62,7 +62,7 @@ public NameNodeResourceChecker(Configuration conf) throws IOException { duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT); - + Collection extraCheckedVolumes = Util.stringCollectionAsURIs(conf .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java new file mode 100644 index 0000000000..7fdf3e60d3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -0,0 +1,980 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; + +import org.apache.hadoop.ha.HealthCheckFailedException; +import org.apache.hadoop.ha.ServiceFailedException; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; +import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; +import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.io.EnumSetWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.ProtocolSignature; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.Groups; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.apache.hadoop.tools.GetUserMappingsProtocol; + +/** + * This class is responsible for handling all of the RPC calls to the NameNode. + * It is created, started, and stopped by {@link NameNode}. + */ +class NameNodeRpcServer implements NamenodeProtocols { + + private static final Log LOG = NameNode.LOG; + private static final Log stateChangeLog = NameNode.stateChangeLog; + + // Dependencies from other parts of NN. + private final FSNamesystem namesystem; + protected final NameNode nn; + private final NameNodeMetrics metrics; + + private final boolean serviceAuthEnabled; + + /** The RPC server that listens to requests from DataNodes */ + private final RPC.Server serviceRpcServer; + private final InetSocketAddress serviceRPCAddress; + + /** The RPC server that listens to requests from clients */ + protected final RPC.Server server; + protected final InetSocketAddress rpcAddress; + + public NameNodeRpcServer(Configuration conf, NameNode nn) + throws IOException { + this.nn = nn; + this.namesystem = nn.getNamesystem(); + this.metrics = NameNode.getNameNodeMetrics(); + + int handlerCount = + conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, + DFS_DATANODE_HANDLER_COUNT_DEFAULT); + InetSocketAddress socAddr = nn.getRpcServerAddress(conf); + + InetSocketAddress dnSocketAddr = nn.getServiceRpcServerAddress(conf); + if (dnSocketAddr != null) { + int serviceHandlerCount = + conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, + DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); + this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this, + dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount, + false, conf, namesystem.getDelegationTokenSecretManager()); + this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress(); + nn.setRpcServiceServerAddress(conf, serviceRPCAddress); + } else { + serviceRpcServer = null; + serviceRPCAddress = null; + } + this.server = RPC.getServer(NamenodeProtocols.class, this, + socAddr.getHostName(), socAddr.getPort(), + handlerCount, false, conf, + namesystem.getDelegationTokenSecretManager()); + + // set service-level authorization security policy + if (serviceAuthEnabled = + conf.getBoolean( + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { + this.server.refreshServiceAcl(conf, new HDFSPolicyProvider()); + if (this.serviceRpcServer != null) { + this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + } + } + + // The rpc-server port can be ephemeral... ensure we have the correct info + this.rpcAddress = this.server.getListenerAddress(); + nn.setRpcServerAddress(conf, rpcAddress); + } + + /** + * Actually start serving requests. + */ + void start() { + server.start(); //start RPC server + if (serviceRpcServer != null) { + serviceRpcServer.start(); + } + } + + /** + * Wait until the RPC server has shut down. + */ + void join() throws InterruptedException { + this.server.join(); + } + + void stop() { + if(server != null) server.stop(); + if(serviceRpcServer != null) serviceRpcServer.stop(); + } + + InetSocketAddress getServiceRpcAddress() { + return serviceRPCAddress; + } + + InetSocketAddress getRpcAddress() { + return rpcAddress; + } + + @Override // VersionedProtocol + public ProtocolSignature getProtocolSignature(String protocol, + long clientVersion, int clientMethodsHash) throws IOException { + return ProtocolSignature.getProtocolSignature( + this, protocol, clientVersion, clientMethodsHash); + } + + @Override + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException { + if (protocol.equals(ClientProtocol.class.getName())) { + return ClientProtocol.versionID; + } else if (protocol.equals(DatanodeProtocol.class.getName())){ + return DatanodeProtocol.versionID; + } else if (protocol.equals(NamenodeProtocol.class.getName())){ + return NamenodeProtocol.versionID; + } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ + return RefreshAuthorizationPolicyProtocol.versionID; + } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){ + return RefreshUserMappingsProtocol.versionID; + } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){ + return GetUserMappingsProtocol.versionID; + } else { + throw new IOException("Unknown protocol to name node: " + protocol); + } + } + + ///////////////////////////////////////////////////// + // NamenodeProtocol + ///////////////////////////////////////////////////// + @Override // NamenodeProtocol + public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) + throws IOException { + if(size <= 0) { + throw new IllegalArgumentException( + "Unexpected not positive size: "+size); + } + + return namesystem.getBlockManager().getBlocks(datanode, size); + } + + @Override // NamenodeProtocol + public ExportedBlockKeys getBlockKeys() throws IOException { + return namesystem.getBlockManager().getBlockKeys(); + } + + @Override // NamenodeProtocol + public void errorReport(NamenodeRegistration registration, + int errorCode, + String msg) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + verifyRequest(registration); + LOG.info("Error report from " + registration + ": " + msg); + if(errorCode == FATAL) + namesystem.releaseBackupNode(registration); + } + + @Override // NamenodeProtocol + public NamenodeRegistration register(NamenodeRegistration registration) + throws IOException { + verifyVersion(registration.getVersion()); + NamenodeRegistration myRegistration = nn.setRegistration(); + namesystem.registerBackupNode(registration, myRegistration); + return myRegistration; + } + + @Override // NamenodeProtocol + public NamenodeCommand startCheckpoint(NamenodeRegistration registration) + throws IOException { + verifyRequest(registration); + if(!nn.isRole(NamenodeRole.NAMENODE)) + throw new IOException("Only an ACTIVE node can invoke startCheckpoint."); + return namesystem.startCheckpoint(registration, nn.setRegistration()); + } + + @Override // NamenodeProtocol + public void endCheckpoint(NamenodeRegistration registration, + CheckpointSignature sig) throws IOException { + nn.checkOperation(OperationCategory.CHECKPOINT); + namesystem.endCheckpoint(registration, sig); + } + + @Override // ClientProtocol + public Token getDelegationToken(Text renewer) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + return namesystem.getDelegationToken(renewer); + } + + @Override // ClientProtocol + public long renewDelegationToken(Token token) + throws InvalidToken, IOException { + nn.checkOperation(OperationCategory.WRITE); + return namesystem.renewDelegationToken(token); + } + + @Override // ClientProtocol + public void cancelDelegationToken(Token token) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.cancelDelegationToken(token); + } + + @Override // ClientProtocol + public LocatedBlocks getBlockLocations(String src, + long offset, + long length) + throws IOException { + nn.checkOperation(OperationCategory.READ); + metrics.incrGetBlockLocations(); + return namesystem.getBlockLocations(getClientMachine(), + src, offset, length); + } + + @Override // ClientProtocol + public FsServerDefaults getServerDefaults() throws IOException { + return namesystem.getServerDefaults(); + } + + @Override // ClientProtocol + public void create(String src, + FsPermission masked, + String clientName, + EnumSetWritable flag, + boolean createParent, + short replication, + long blockSize) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + String clientMachine = getClientMachine(); + if (stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.create: file " + +src+" for "+clientName+" at "+clientMachine); + } + if (!checkPathLength(src)) { + throw new IOException("create: Pathname too long. Limit " + + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + } + namesystem.startFile(src, + new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), + null, masked), + clientName, clientMachine, flag.get(), createParent, replication, blockSize); + metrics.incrFilesCreated(); + metrics.incrCreateFileOps(); + } + + @Override // ClientProtocol + public LocatedBlock append(String src, String clientName) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + String clientMachine = getClientMachine(); + if (stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.append: file " + +src+" for "+clientName+" at "+clientMachine); + } + LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine); + metrics.incrFilesAppended(); + return info; + } + + @Override // ClientProtocol + public boolean recoverLease(String src, String clientName) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + String clientMachine = getClientMachine(); + return namesystem.recoverLease(src, clientName, clientMachine); + } + + @Override // ClientProtocol + public boolean setReplication(String src, short replication) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + return namesystem.setReplication(src, replication); + } + + @Override // ClientProtocol + public void setPermission(String src, FsPermission permissions) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.setPermission(src, permissions); + } + + @Override // ClientProtocol + public void setOwner(String src, String username, String groupname) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.setOwner(src, username, groupname); + } + + @Override // ClientProtocol + public LocatedBlock addBlock(String src, + String clientName, + ExtendedBlock previous, + DatanodeInfo[] excludedNodes) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + +src+" for "+clientName); + } + HashMap excludedNodesSet = null; + if (excludedNodes != null) { + excludedNodesSet = new HashMap(excludedNodes.length); + for (Node node:excludedNodes) { + excludedNodesSet.put(node, node); + } + } + LocatedBlock locatedBlock = + namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet); + if (locatedBlock != null) + metrics.incrAddBlockOps(); + return locatedBlock; + } + + @Override // ClientProtocol + public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk, + final DatanodeInfo[] existings, final DatanodeInfo[] excludes, + final int numAdditionalNodes, final String clientName + ) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if (LOG.isDebugEnabled()) { + LOG.debug("getAdditionalDatanode: src=" + src + + ", blk=" + blk + + ", existings=" + Arrays.asList(existings) + + ", excludes=" + Arrays.asList(excludes) + + ", numAdditionalNodes=" + numAdditionalNodes + + ", clientName=" + clientName); + } + + metrics.incrGetAdditionalDatanodeOps(); + + HashMap excludeSet = null; + if (excludes != null) { + excludeSet = new HashMap(excludes.length); + for (Node node : excludes) { + excludeSet.put(node, node); + } + } + return namesystem.getAdditionalDatanode(src, blk, + existings, excludeSet, numAdditionalNodes, clientName); + } + /** + * The client needs to give up on the block. + */ + @Override // ClientProtocol + public void abandonBlock(ExtendedBlock b, String src, String holder) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: " + +b+" of file "+src); + } + if (!namesystem.abandonBlock(b, src, holder)) { + throw new IOException("Cannot abandon block during write to " + src); + } + } + + @Override // ClientProtocol + public boolean complete(String src, String clientName, ExtendedBlock last) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.complete: " + + src + " for " + clientName); + } + return namesystem.completeFile(src, clientName, last); + } + + /** + * The client has detected an error on the specified located blocks + * and is reporting them to the server. For now, the namenode will + * mark the block as corrupt. In the future we might + * check the blocks are actually corrupt. + */ + @Override // ClientProtocol, DatanodeProtocol + public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + stateChangeLog.info("*DIR* NameNode.reportBadBlocks"); + for (int i = 0; i < blocks.length; i++) { + ExtendedBlock blk = blocks[i].getBlock(); + DatanodeInfo[] nodes = blocks[i].getLocations(); + for (int j = 0; j < nodes.length; j++) { + DatanodeInfo dn = nodes[j]; + namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn); + } + } + } + + @Override // ClientProtocol + public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + return namesystem.updateBlockForPipeline(block, clientName); + } + + + @Override // ClientProtocol + public void updatePipeline(String clientName, ExtendedBlock oldBlock, + ExtendedBlock newBlock, DatanodeID[] newNodes) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes); + } + + @Override // DatanodeProtocol + public void commitBlockSynchronization(ExtendedBlock block, + long newgenerationstamp, long newlength, + boolean closeFile, boolean deleteblock, DatanodeID[] newtargets) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.commitBlockSynchronization(block, + newgenerationstamp, newlength, closeFile, deleteblock, newtargets); + } + + @Override // ClientProtocol + public long getPreferredBlockSize(String filename) + throws IOException { + nn.checkOperation(OperationCategory.READ); + return namesystem.getPreferredBlockSize(filename); + } + + @Deprecated + @Override // ClientProtocol + public boolean rename(String src, String dst) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst); + } + if (!checkPathLength(dst)) { + throw new IOException("rename: Pathname too long. Limit " + + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + } + boolean ret = namesystem.renameTo(src, dst); + if (ret) { + metrics.incrFilesRenamed(); + } + return ret; + } + + @Override // ClientProtocol + public void concat(String trg, String[] src) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.concat(trg, src); + } + + @Override // ClientProtocol + public void rename(String src, String dst, Options.Rename... options) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst); + } + if (!checkPathLength(dst)) { + throw new IOException("rename: Pathname too long. Limit " + + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + } + namesystem.renameTo(src, dst, options); + metrics.incrFilesRenamed(); + } + + @Deprecated + @Override // ClientProtocol + public boolean delete(String src) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + return delete(src, true); + } + + @Override // ClientProtocol + public boolean delete(String src, boolean recursive) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if (stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* Namenode.delete: src=" + src + + ", recursive=" + recursive); + } + boolean ret = namesystem.delete(src, recursive); + if (ret) + metrics.incrDeleteFileOps(); + return ret; + } + + /** + * Check path length does not exceed maximum. Returns true if + * length and depth are okay. Returns false if length is too long + * or depth is too great. + */ + private boolean checkPathLength(String src) { + Path srcPath = new Path(src); + return (src.length() <= MAX_PATH_LENGTH && + srcPath.depth() <= MAX_PATH_DEPTH); + } + + @Override // ClientProtocol + public boolean mkdirs(String src, FsPermission masked, boolean createParent) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src); + } + if (!checkPathLength(src)) { + throw new IOException("mkdirs: Pathname too long. Limit " + + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); + } + return namesystem.mkdirs(src, + new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), + null, masked), createParent); + } + + @Override // ClientProtocol + public void renewLease(String clientName) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.renewLease(clientName); + } + + @Override // ClientProtocol + public DirectoryListing getListing(String src, byte[] startAfter, + boolean needLocation) throws IOException { + nn.checkOperation(OperationCategory.READ); + DirectoryListing files = namesystem.getListing( + src, startAfter, needLocation); + if (files != null) { + metrics.incrGetListingOps(); + metrics.incrFilesInGetListingOps(files.getPartialListing().length); + } + return files; + } + + @Override // ClientProtocol + public HdfsFileStatus getFileInfo(String src) throws IOException { + nn.checkOperation(OperationCategory.READ); + metrics.incrFileInfoOps(); + return namesystem.getFileInfo(src, true); + } + + @Override // ClientProtocol + public HdfsFileStatus getFileLinkInfo(String src) throws IOException { + nn.checkOperation(OperationCategory.READ); + metrics.incrFileInfoOps(); + return namesystem.getFileInfo(src, false); + } + + @Override + public long[] getStats() { + return namesystem.getStats(); + } + + @Override // ClientProtocol + public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) + throws IOException { + nn.checkOperation(OperationCategory.READ); + DatanodeInfo results[] = namesystem.datanodeReport(type); + if (results == null ) { + throw new IOException("Cannot find datanode report"); + } + return results; + } + + @Override // ClientProtocol + public boolean setSafeMode(SafeModeAction action) throws IOException { + // TODO:HA decide on OperationCategory for this + return namesystem.setSafeMode(action); + } + @Override // ClientProtocol + public boolean restoreFailedStorage(String arg) + throws AccessControlException { + // TODO:HA decide on OperationCategory for this + return namesystem.restoreFailedStorage(arg); + } + + @Override // ClientProtocol + public void saveNamespace() throws IOException { + // TODO:HA decide on OperationCategory for this + namesystem.saveNamespace(); + } + + @Override // ClientProtocol + public void refreshNodes() throws IOException { + // TODO:HA decide on OperationCategory for this + namesystem.getBlockManager().getDatanodeManager().refreshNodes( + new HdfsConfiguration()); + } + + @Override // NamenodeProtocol + public long getTransactionID() { + // TODO:HA decide on OperationCategory for this + return namesystem.getEditLog().getSyncTxId(); + } + + @Override // NamenodeProtocol + public CheckpointSignature rollEditLog() throws IOException { + // TODO:HA decide on OperationCategory for this + return namesystem.rollEditLog(); + } + + @Override // NamenodeProtocol + public RemoteEditLogManifest getEditLogManifest(long sinceTxId) + throws IOException { + // TODO:HA decide on OperationCategory for this + return namesystem.getEditLog().getEditLogManifest(sinceTxId); + } + + @Override // ClientProtocol + public void finalizeUpgrade() throws IOException { + // TODO:HA decide on OperationCategory for this + namesystem.finalizeUpgrade(); + } + + @Override // ClientProtocol + public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) + throws IOException { + // TODO:HA decide on OperationCategory for this + return namesystem.distributedUpgradeProgress(action); + } + + @Override // ClientProtocol + public void metaSave(String filename) throws IOException { + // TODO:HA decide on OperationCategory for this + namesystem.metaSave(filename); + } + @Override // ClientProtocol + public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) + throws IOException { + nn.checkOperation(OperationCategory.READ); + Collection fbs = + namesystem.listCorruptFileBlocks(path, cookie); + + String[] files = new String[fbs.size()]; + String lastCookie = ""; + int i = 0; + for(FSNamesystem.CorruptFileBlockInfo fb: fbs) { + files[i++] = fb.path; + lastCookie = fb.block.getBlockName(); + } + return new CorruptFileBlocks(files, lastCookie); + } + + /** + * Tell all datanodes to use a new, non-persistent bandwidth value for + * dfs.datanode.balance.bandwidthPerSec. + * @param bandwidth Blanacer bandwidth in bytes per second for all datanodes. + * @throws IOException + */ + @Override // ClientProtocol + public void setBalancerBandwidth(long bandwidth) throws IOException { + // TODO:HA decide on OperationCategory for this + namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); + } + + @Override // ClientProtocol + public ContentSummary getContentSummary(String path) throws IOException { + nn.checkOperation(OperationCategory.READ); + return namesystem.getContentSummary(path); + } + + @Override // ClientProtocol + public void setQuota(String path, long namespaceQuota, long diskspaceQuota) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.setQuota(path, namespaceQuota, diskspaceQuota); + } + + @Override // ClientProtocol + public void fsync(String src, String clientName) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.fsync(src, clientName); + } + + @Override // ClientProtocol + public void setTimes(String src, long mtime, long atime) + throws IOException { + nn.checkOperation(OperationCategory.WRITE); + namesystem.setTimes(src, mtime, atime); + } + + @Override // ClientProtocol + public void createSymlink(String target, String link, FsPermission dirPerms, + boolean createParent) throws IOException { + nn.checkOperation(OperationCategory.WRITE); + metrics.incrCreateSymlinkOps(); + /* We enforce the MAX_PATH_LENGTH limit even though a symlink target + * URI may refer to a non-HDFS file system. + */ + if (!checkPathLength(link)) { + throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + + " character limit"); + + } + if ("".equals(target)) { + throw new IOException("Invalid symlink target"); + } + final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + namesystem.createSymlink(target, link, + new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent); + } + + @Override // ClientProtocol + public String getLinkTarget(String path) throws IOException { + nn.checkOperation(OperationCategory.READ); + metrics.incrGetLinkTargetOps(); + /* Resolves the first symlink in the given path, returning a + * new path consisting of the target of the symlink and any + * remaining path components from the original path. + */ + try { + HdfsFileStatus stat = namesystem.getFileInfo(path, false); + if (stat != null) { + // NB: getSymlink throws IOException if !stat.isSymlink() + return stat.getSymlink(); + } + } catch (UnresolvedPathException e) { + return e.getResolvedPath().toString(); + } catch (UnresolvedLinkException e) { + // The NameNode should only throw an UnresolvedPathException + throw new AssertionError("UnresolvedLinkException thrown"); + } + return null; + } + + + @Override // DatanodeProtocol + public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg) + throws IOException { + verifyVersion(nodeReg.getVersion()); + namesystem.registerDatanode(nodeReg); + + return nodeReg; + } + + @Override // DatanodeProtocol + public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg, + long capacity, long dfsUsed, long remaining, long blockPoolUsed, + int xmitsInProgress, int xceiverCount, int failedVolumes) + throws IOException { + verifyRequest(nodeReg); + return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining, + blockPoolUsed, xceiverCount, xmitsInProgress, failedVolumes); + } + + @Override // DatanodeProtocol + public DatanodeCommand blockReport(DatanodeRegistration nodeReg, + String poolId, long[] blocks) throws IOException { + verifyRequest(nodeReg); + BlockListAsLongs blist = new BlockListAsLongs(blocks); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*BLOCK* NameNode.blockReport: " + + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks() + + " blocks"); + } + + namesystem.getBlockManager().processReport(nodeReg, poolId, blist); + if (nn.getFSImage().isUpgradeFinalized()) + return new DatanodeCommand.Finalize(poolId); + return null; + } + + @Override // DatanodeProtocol + public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, + ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks) throws IOException { + verifyRequest(nodeReg); + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: " + +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length + +" blocks."); + } + namesystem.getBlockManager().blockReceivedAndDeleted( + nodeReg, poolId, receivedAndDeletedBlocks); + } + + @Override // DatanodeProtocol + public void errorReport(DatanodeRegistration nodeReg, + int errorCode, String msg) throws IOException { + String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName()); + + if (errorCode == DatanodeProtocol.NOTIFY) { + LOG.info("Error report from " + dnName + ": " + msg); + return; + } + verifyRequest(nodeReg); + + if (errorCode == DatanodeProtocol.DISK_ERROR) { + LOG.warn("Disk error on " + dnName + ": " + msg); + } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { + LOG.warn("Fatal disk error on " + dnName + ": " + msg); + namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg); + } else { + LOG.info("Error report from " + dnName + ": " + msg); + } + } + + @Override // DatanodeProtocol, NamenodeProtocol + public NamespaceInfo versionRequest() throws IOException { + return namesystem.getNamespaceInfo(); + } + + @Override // DatanodeProtocol + public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException { + return namesystem.processDistributedUpgradeCommand(comm); + } + + /** + * Verify request. + * + * Verifies correctness of the datanode version, registration ID, and + * if the datanode does not need to be shutdown. + * + * @param nodeReg data node registration + * @throws IOException + */ + void verifyRequest(NodeRegistration nodeReg) throws IOException { + verifyVersion(nodeReg.getVersion()); + if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) { + LOG.warn("Invalid registrationID - expected: " + + namesystem.getRegistrationID() + " received: " + + nodeReg.getRegistrationID()); + throw new UnregisteredNodeException(nodeReg); + } + } + + @Override // RefreshAuthorizationPolicyProtocol + public void refreshServiceAcl() throws IOException { + if (!serviceAuthEnabled) { + throw new AuthorizationException("Service Level Authorization not enabled!"); + } + + this.server.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider()); + if (this.serviceRpcServer != null) { + this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider()); + } + } + + @Override // RefreshAuthorizationPolicyProtocol + public void refreshUserToGroupsMappings() throws IOException { + LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + + UserGroupInformation.getCurrentUser().getShortUserName()); + Groups.getUserToGroupsMappingService().refresh(); + } + + @Override // RefreshAuthorizationPolicyProtocol + public void refreshSuperUserGroupsConfiguration() { + LOG.info("Refreshing SuperUser proxy group mapping list "); + + ProxyUsers.refreshSuperUserGroupsConfiguration(); + } + + @Override // GetUserMappingsProtocol + public String[] getGroupsForUser(String user) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Getting groups for user " + user); + } + return UserGroupInformation.createRemoteUser(user).getGroupNames(); + } + + @Override // HAServiceProtocol + public synchronized void monitorHealth() throws HealthCheckFailedException { + nn.monitorHealth(); + } + + @Override // HAServiceProtocol + public synchronized void transitionToActive() throws ServiceFailedException { + nn.transitionToActive(); + } + + @Override // HAServiceProtocol + public synchronized void transitionToStandby() throws ServiceFailedException { + nn.transitionToStandby(); + } + + /** + * Verify version. + * + * @param version + * @throws IOException + */ + void verifyVersion(int version) throws IOException { + if (version != HdfsConstants.LAYOUT_VERSION) + throw new IncorrectVersionException(version, "data node"); + } + + private static String getClientMachine() { + String clientMachine = Server.getRemoteAddress(); + if (clientMachine == null) { + clientMachine = ""; + } + return clientMachine; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 26376d476f..358d778eaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NodeBase; @@ -173,7 +173,7 @@ public void fsck() { out.println(msg); namenode.getNamesystem().logFsckEvent(path, remoteAddress); - final HdfsFileStatus file = namenode.getFileInfo(path); + final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path); if (file != null) { if (showCorruptFileBlocks) { @@ -250,7 +250,8 @@ private void check(String parent, HdfsFileStatus file, Result res) throws IOExce res.totalDirs++; do { assert lastReturnedName != null; - thisListing = namenode.getListing(path, lastReturnedName, false); + thisListing = namenode.getRpcServer().getListing( + path, lastReturnedName, false); if (thisListing == null) { return; } @@ -385,7 +386,7 @@ private void check(String parent, HdfsFileStatus file, Result res) throws IOExce break; case FIXING_DELETE: if (!isOpen) - namenode.delete(path, true); + namenode.getRpcServer().delete(path, true); } } if (showFiles) { @@ -414,7 +415,8 @@ private void lostFoundMove(String parent, HdfsFileStatus file, LocatedBlocks blo String target = lostFound + fullName; String errmsg = "Failed to move " + fullName + " to /lost+found"; try { - if (!namenode.mkdirs(target, file.getPermission(), true)) { + if (!namenode.getRpcServer().mkdirs( + target, file.getPermission(), true)) { LOG.warn(errmsg); return; } @@ -502,8 +504,8 @@ private void copyBlock(DFSClient dfs, LocatedBlock lblock, } try { s = new Socket(); - s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index f50e1f8b9f..3d2fd8b0be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; @@ -354,7 +355,7 @@ void generateHealthReport(JspWriter out, NameNode nn, } } - static String getDelegationToken(final NameNode nn, + static String getDelegationToken(final NamenodeProtocols nn, HttpServletRequest request, Configuration conf, final UserGroupInformation ugi) throws IOException, InterruptedException { Token token = ugi @@ -381,7 +382,8 @@ static void redirectToRandomDataNode(ServletContext context, .getAttribute(JspHelper.CURRENT_CONF); final DatanodeID datanode = getRandomDatanode(nn); UserGroupInformation ugi = JspHelper.getUGI(context, request, conf); - String tokenString = getDelegationToken(nn, request, conf, ugi); + String tokenString = getDelegationToken( + nn.getRpcServer(), request, conf, ugi); // if the user is defined, get a delegation token and stringify it final String redirectLocation; final String nodeToRedirect; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java index 5e2041cd38..ddd0acbbfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java @@ -70,7 +70,7 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res try { long result = ugi.doAs(new PrivilegedExceptionAction() { public Long run() throws Exception { - return nn.renewDelegationToken(token); + return nn.getRpcServer().renewDelegationToken(token); } }); PrintStream os = new PrintStream(resp.getOutputStream()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index f126f17eeb..9c5ef6f2c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -45,8 +45,8 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -122,6 +122,8 @@ public class SecondaryNameNode implements Runnable { /** checkpoint once every this many transactions, regardless of time */ private long checkpointTxnCount; + private FSNamesystem namesystem; + /** {@inheritDoc} */ public String toString() { @@ -220,6 +222,8 @@ private void initialize(final Configuration conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs); checkpointImage.recoverCreate(commandLineOpts.shouldFormat()); + + namesystem = new FSNamesystem(conf, checkpointImage); // Initialize other scheduling parameters from the configuration checkpointCheckPeriod = conf.getLong( @@ -456,7 +460,7 @@ InetSocketAddress getNameNodeAddress() { */ private String getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); - if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { + if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { throw new IOException("This is not a DFS"); } @@ -520,7 +524,7 @@ boolean doCheckpoint() throws IOException { boolean loadImage = downloadCheckpointFiles( fsName, checkpointImage, sig, manifest); // Fetch fsimage and edits - doMerge(sig, manifest, loadImage, checkpointImage); + doMerge(sig, manifest, loadImage, checkpointImage, namesystem); // // Upload the new image into the NameNode. Then tell the Namenode @@ -750,8 +754,7 @@ static class CheckpointStorage extends FSImage { CheckpointStorage(Configuration conf, Collection imageDirs, Collection editsDirs) throws IOException { - super(conf, (FSNamesystem)null, imageDirs, editsDirs); - setFSNamesystem(new FSNamesystem(this, conf)); + super(conf, imageDirs, editsDirs); // the 2NN never writes edits -- it only downloads them. So // we shouldn't have any editLog instance. Setting to null @@ -793,7 +796,7 @@ void recoverCreate(boolean format) throws IOException { StorageState curState; try { - curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage); + curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: @@ -837,7 +840,8 @@ void ensureCurrentDirExists() throws IOException { static void doMerge( CheckpointSignature sig, RemoteEditLogManifest manifest, - boolean loadImage, FSImage dstImage) throws IOException { + boolean loadImage, FSImage dstImage, FSNamesystem dstNamesystem) + throws IOException { NNStorage dstStorage = dstImage.getStorage(); dstStorage.setStorageInfo(sig); @@ -848,11 +852,11 @@ static void doMerge( sig.mostRecentCheckpointTxId + " even though it should have " + "just been downloaded"); } - dstImage.reloadFromImageFile(file); + dstImage.reloadFromImageFile(file, dstNamesystem); } - Checkpointer.rollForwardByApplyingLogs(manifest, dstImage); - dstImage.saveFSImageInAllDirs(dstImage.getLastAppliedTxId()); + Checkpointer.rollForwardByApplyingLogs(manifest, dstImage, dstNamesystem); + dstImage.saveFSImageInAllDirs(dstNamesystem, dstImage.getLastAppliedTxId()); dstStorage.writeAll(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 944e998ecf..cc8dccaf1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.util.DataTransferThrottler; @@ -124,7 +124,7 @@ static void uploadImageFromStorage(String fsName, static void getFileServer(OutputStream outstream, File localfile, DataTransferThrottler throttler) throws IOException { - byte buf[] = new byte[FSConstants.IO_FILE_BUFFER_SIZE]; + byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; FileInputStream infile = null; try { infile = new FileInputStream(localfile); @@ -139,7 +139,7 @@ static void getFileServer(OutputStream outstream, File localfile, && localfile.getAbsolutePath().contains("fsimage")) { // Test sending image shorter than localfile long len = localfile.length(); - buf = new byte[(int)Math.min(len/2, FSConstants.IO_FILE_BUFFER_SIZE)]; + buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); @@ -179,7 +179,7 @@ static void getFileServer(OutputStream outstream, File localfile, static MD5Hash getFileClient(String nnHostPort, String queryString, List localPaths, NNStorage dstStorage, boolean getChecksum) throws IOException { - byte[] buf = new byte[FSConstants.IO_FILE_BUFFER_SIZE]; + byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://"; StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?"); str.append(queryString); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java index b4e89e3fa1..a46efae8a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java @@ -19,9 +19,9 @@ import java.io.IOException; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.UpgradeManager; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; @@ -38,8 +38,8 @@ * and updates its status. */ class UpgradeManagerNamenode extends UpgradeManager { - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.NAME_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.NAME_NODE; } private final FSNamesystem namesystem; @@ -66,7 +66,7 @@ public synchronized boolean startUpgrade() throws IOException { this.broadcastCommand = currentUpgrades.first().startUpgrade(); NameNode.LOG.info("\n Distributed upgrade for NameNode version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is started."); + + HdfsConstants.LAYOUT_VERSION + " is started."); return true; } @@ -75,7 +75,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command if(NameNode.LOG.isDebugEnabled()) { NameNode.LOG.debug("\n Distributed upgrade for NameNode version " + getUpgradeVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is processing upgrade command: " + + HdfsConstants.LAYOUT_VERSION + " is processing upgrade command: " + command.getAction() + " status = " + getUpgradeStatus() + "%"); } if(currentUpgrades == null) { @@ -96,7 +96,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command curUO.completeUpgrade(); NameNode.LOG.info("\n Distributed upgrade for NameNode version " + curUO.getVersion() + " to current LV " - + FSConstants.LAYOUT_VERSION + " is complete."); + + HdfsConstants.LAYOUT_VERSION + " is complete."); // proceede with the next one currentUpgrades.remove(curUO); if(currentUpgrades.isEmpty()) { // all upgrades are done @@ -110,7 +110,7 @@ synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command public synchronized void completeUpgrade() throws IOException { // set and write new upgrade state into disk - setUpgradeState(false, FSConstants.LAYOUT_VERSION); + setUpgradeState(false, HdfsConstants.LAYOUT_VERSION); namesystem.getFSImage().getStorage().writeAll(); currentUpgrades = null; broadcastCommand = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java index 0872eb22c0..5a75554544 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java @@ -20,7 +20,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeObject; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; @@ -44,8 +44,8 @@ public abstract class UpgradeObjectNamenode extends UpgradeObject { public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command ) throws IOException; - public HdfsConstants.NodeType getType() { - return HdfsConstants.NodeType.NAME_NODE; + public HdfsServerConstants.NodeType getType() { + return HdfsServerConstants.NodeType.NAME_NODE; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index 2ee1866617..a75701ef86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.metrics; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java index 80426605a0..aa98ab19b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java @@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; /** * Information sent by a subordinate name-node to the active name-node diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index e847cfc371..cc33a04d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NNStorage; @@ -53,7 +53,7 @@ public NamespaceInfo() { public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, int duVersion) { - super(FSConstants.LAYOUT_VERSION, nsID, clusterID, cT); + super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT); blockPoolID = bpID; buildVersion = Storage.getBuildVersion(); this.distributedUpgradeVersion = duVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java index 5b8ac59f37..c82494d5ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java @@ -22,15 +22,15 @@ import java.io.IOException; import java.util.Comparator; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.Writable; import com.google.common.base.Function; import com.google.common.collect.ComparisonChain; public class RemoteEditLog implements Writable, Comparable { - private long startTxId = FSConstants.INVALID_TXID; - private long endTxId = FSConstants.INVALID_TXID; + private long startTxId = HdfsConstants.INVALID_TXID; + private long endTxId = HdfsConstants.INVALID_TXID; public RemoteEditLog() { } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java index cdf1d791d2..bf9b68b1b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 91cf9eec58..b4f4e7c4d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -40,9 +40,9 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.RPC; @@ -115,7 +115,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(path, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET); } } @@ -161,7 +161,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, quota, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(path, quota, HdfsConstants.QUOTA_DONT_SET); } } @@ -200,7 +200,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_RESET); + dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET); } } @@ -250,7 +250,7 @@ public String getCommandName() { @Override public void run(Path path) throws IOException { - dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, quota); + dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, quota); } } @@ -288,7 +288,7 @@ public void report() throws IOException { long used = ds.getUsed(); long remaining = ds.getRemaining(); long presentCapacity = used + remaining; - boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET); + boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET); UpgradeStatusReport status = dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS); @@ -361,17 +361,17 @@ public void setSafeMode(String[] argv, int idx) throws IOException { printUsage("-safemode"); return; } - FSConstants.SafeModeAction action; + HdfsConstants.SafeModeAction action; Boolean waitExitSafe = false; if ("leave".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_LEAVE; + action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE; } else if ("enter".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_ENTER; + action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER; } else if ("get".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_GET; + action = HdfsConstants.SafeModeAction.SAFEMODE_GET; } else if ("wait".equalsIgnoreCase(argv[idx])) { - action = FSConstants.SafeModeAction.SAFEMODE_GET; + action = HdfsConstants.SafeModeAction.SAFEMODE_GET; waitExitSafe = true; } else { printUsage("-safemode"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java index c68cef6a1d..617b90026c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import static org.apache.hadoop.fs.FileContextTestHelper.*; import org.apache.hadoop.ipc.RemoteException; import static org.junit.Assert.*; @@ -212,7 +212,7 @@ public void testSetReplication() throws IOException { public void testCreateLinkMaxPathLink() throws IOException { Path dir = new Path(testBaseDir1()); Path file = new Path(testBaseDir1(), "file"); - final int maxPathLen = FSConstants.MAX_PATH_LENGTH; + final int maxPathLen = HdfsConstants.MAX_PATH_LENGTH; final int dirLen = dir.toString().length() + 1; int len = maxPathLen - dirLen; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index a932f881a2..a437fffadd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.AfterClass; @@ -108,11 +108,11 @@ public void testOldRenameWithQuota() throws Exception { Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2"); createFile(src1); createFile(src2); - fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET, - FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET, + HdfsConstants.QUOTA_DONT_SET); fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true); - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); /* * Test1: src does not exceed quota and dst has no quota check and hence * accommodates rename @@ -130,7 +130,7 @@ public void testOldRenameWithQuota() throws Exception { * Test3: src exceeds quota and dst has *no* quota to accommodate rename */ // src1 has no quota to accommodate new rename node - fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET); oldRename(dst1, src1, false, true); } @@ -143,11 +143,11 @@ public void testRenameWithQuota() throws Exception { Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2"); createFile(src1); createFile(src2); - fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET, - FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET, + HdfsConstants.QUOTA_DONT_SET); fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true); - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); /* * Test1: src does not exceed quota and dst has no quota check and hence * accommodates rename @@ -170,7 +170,7 @@ public void testRenameWithQuota() throws Exception { * rename to a destination that does not exist */ // src1 has no quota to accommodate new rename node - fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET); rename(dst1, src1, false, false, true, Rename.NONE); /* @@ -179,9 +179,9 @@ public void testRenameWithQuota() throws Exception { * is same as quota needed by src. */ // src1 has no quota to accommodate new rename node - fs.setQuota(src1.getParent(), 100, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET); createFile(src1); - fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET); + fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET); rename(dst1, src1, true, true, false, Rename.OVERWRITE); } @@ -208,7 +208,7 @@ public void testEditsLogOldRename() throws Exception { createFile(dst1); // Set quota so that dst1 parent cannot allow under it new files/directories - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); // Free up quota for a subsequent rename fs.delete(dst1, true); oldRename(src1, dst1, true, false); @@ -237,7 +237,7 @@ public void testEditsLogRename() throws Exception { createFile(dst1); // Set quota so that dst1 parent cannot allow under it new files/directories - fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); // Free up quota for a subsequent rename fs.delete(dst1, true); rename(src1, dst1, true, true, false, Rename.OVERWRITE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 25585cecbb..c61e65b6c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; @@ -140,8 +140,8 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR DatanodeInfo[] nodes = testBlock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getName()); sock = new Socket(); - sock.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); + sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); return BlockReaderFactory.newBlockReader( sock, targetAddr.toString()+ ":" + block.getBlockId(), block, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 3d8b6f29f5..c7566d2c62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; @@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -670,7 +670,7 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), - FSConstants.SMALL_BUFFER_SIZE)); + HdfsConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java index 8e19f45641..f82986f331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java @@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog; import org.apache.hadoop.net.DNS; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 89627b71b9..e51401cfc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -47,8 +47,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; @@ -1025,6 +1025,14 @@ public NameNode getNameNode() { return getNameNode(0); } + /** + * Get an instance of the NameNode's RPC handler. + */ + public NamenodeProtocols getNameNodeRpc() { + checkSingleNameNode(); + return getNameNode(0).getRpcServer(); + } + /** * Gets the NameNode for the index. May be null. */ @@ -1361,7 +1369,15 @@ public boolean isNameNodeUp(int nnIndex) { if (nameNode == null) { return false; } - long[] sizes = nameNode.getStats(); + long[] sizes; + try { + sizes = nameNode.getRpcServer().getStats(); + } catch (IOException ioe) { + // This method above should never throw. + // It only throws IOE since it is exposed via RPC + throw new AssertionError("Unexpected IOE thrown: " + + StringUtils.stringifyException(ioe)); + } boolean isUp = false; synchronized (this) { isUp = ((!nameNode.isInSafeMode() || !waitSafeMode) && sizes[0] != 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java index d06d9766c3..1613e82ca2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java @@ -23,7 +23,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -85,7 +85,7 @@ public void testAbandonBlock() throws IOException { public void testQuotaUpdatedWhenBlockAbandoned() throws IOException { DistributedFileSystem dfs = (DistributedFileSystem)fs; // Setting diskspace quota to 3MB - dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); + dfs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); // Start writing a file with 2 replicas to ensure each datanode has one. // Block Size is 1MB. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index a65e6a233f..e7988f99bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.io.IOUtils; @@ -45,7 +46,7 @@ public class TestClientProtocolForPipelineRecovery { try { cluster.waitActive(); FileSystem fileSys = cluster.getFileSystem(); - NameNode namenode = cluster.getNameNode(); + NamenodeProtocols namenode = cluster.getNameNodeRpc(); /* Test writing to finalized replicas */ Path file = new Path("dataprotocol.dat"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index 1d43ea7e6b..1407fd46a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 05fa648653..9cc1b2999c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -25,7 +25,12 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; +import java.net.SocketTimeoutException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.LongWritable; import java.io.IOException; +import java.net.InetSocketAddress; import java.io.InputStream; import java.io.OutputStream; import java.security.MessageDigest; @@ -44,14 +49,22 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.Client; +import org.apache.hadoop.ipc.ProtocolSignature; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.NetUtils; import org.mockito.internal.stubbing.answers.ThrowsException; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -61,9 +74,51 @@ * properly in case of errors. */ public class TestDFSClientRetries extends TestCase { + private static final String ADDRESS = "0.0.0.0"; + final static private int PING_INTERVAL = 1000; + final static private int MIN_SLEEP_TIME = 1000; public static final Log LOG = LogFactory.getLog(TestDFSClientRetries.class.getName()); - + final static private Configuration conf = new HdfsConfiguration(); + + private static class TestServer extends Server { + private boolean sleep; + private Class responseClass; + + public TestServer(int handlerCount, boolean sleep) throws IOException { + this(handlerCount, sleep, LongWritable.class, null); + } + + public TestServer(int handlerCount, boolean sleep, + Class paramClass, + Class responseClass) + throws IOException { + super(ADDRESS, 0, paramClass, handlerCount, conf); + this.sleep = sleep; + this.responseClass = responseClass; + } + + @Override + public Writable call(String protocol, Writable param, long receiveTime) + throws IOException { + if (sleep) { + // sleep a bit + try { + Thread.sleep(PING_INTERVAL + MIN_SLEEP_TIME); + } catch (InterruptedException e) {} + } + if (responseClass != null) { + try { + return responseClass.newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + return param; // echo param as result + } + } + } + // writes 'len' bytes of data to out. private static void writeData(OutputStream out, int len) throws IOException { byte [] buf = new byte[4096*16]; @@ -80,8 +135,6 @@ private static void writeData(OutputStream out, int len) throws IOException { */ public void testWriteTimeoutAtDataNode() throws IOException, InterruptedException { - Configuration conf = new HdfsConfiguration(); - final int writeTimeout = 100; //milliseconds. // set a very short write timeout for datanode, so that tests runs fast. conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout); @@ -136,10 +189,9 @@ public void testNotYetReplicatedErrors() throws IOException { final String exceptionMsg = "Nope, not replicated yet..."; final int maxRetries = 1; // Allow one retry (total of two calls) - Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries); - NameNode mockNN = mock(NameNode.class); + NamenodeProtocols mockNN = mock(NamenodeProtocols.class); Answer answer = new ThrowsException(new IOException()) { int retryCount = 0; @@ -182,7 +234,6 @@ public void testFailuresArePerOperation() throws Exception long fileSize = 4096; Path file = new Path("/testFile"); - Configuration conf = new Configuration(); // Set short retry timeout so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -190,8 +241,8 @@ public void testFailuresArePerOperation() throws Exception try { cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); - NameNode preSpyNN = cluster.getNameNode(); - NameNode spyNN = spy(preSpyNN); + NamenodeProtocols preSpyNN = cluster.getNameNodeRpc(); + NamenodeProtocols spyNN = spy(preSpyNN); DFSClient client = new DFSClient(null, spyNN, conf, null); int maxBlockAcquires = client.getMaxBlockAcquireFailures(); assertTrue(maxBlockAcquires > 0); @@ -255,11 +306,11 @@ public void testFailuresArePerOperation() throws Exception */ private static class FailNTimesAnswer implements Answer { private int failuresLeft; - private NameNode realNN; + private NamenodeProtocols realNN; - public FailNTimesAnswer(NameNode realNN, int timesToFail) { + public FailNTimesAnswer(NamenodeProtocols preSpyNN, int timesToFail) { failuresLeft = timesToFail; - this.realNN = realNN; + this.realNN = preSpyNN; } public LocatedBlocks answer(InvocationOnMock invocation) throws IOException { @@ -379,7 +430,6 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in long blockSize = 128*1024*1024; // DFS block size int bufferSize = 4096; - Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers); conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, retries); @@ -540,7 +590,6 @@ public void testGetFileChecksum() throws Exception { final String f = "/testGetFileChecksum"; final Path p = new Path(f); - final Configuration conf = new Configuration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); @@ -555,7 +604,8 @@ public void testGetFileChecksum() throws Exception { //stop the first datanode final List locatedblocks = DFSClient.callGetBlockLocations( - cluster.getNameNode(), f, 0, Long.MAX_VALUE).getLocatedBlocks(); + cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE) + .getLocatedBlocks(); final DatanodeInfo first = locatedblocks.get(0).getLocations()[0]; cluster.stopDataNode(first.getName()); @@ -566,5 +616,39 @@ public void testGetFileChecksum() throws Exception { cluster.shutdown(); } } + + /** Test that timeout occurs when DN does not respond to RPC. + * Start up a server and ask it to sleep for n seconds. Make an + * RPC to the server and set rpcTimeout to less than n and ensure + * that socketTimeoutException is obtained + */ + public void testClientDNProtocolTimeout() throws IOException { + final Server server = new TestServer(1, true); + server.start(); + + final InetSocketAddress addr = NetUtils.getConnectAddress(server); + DatanodeID fakeDnId = new DatanodeID( + "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort()); + + ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); + LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); + + ClientDatanodeProtocol proxy = null; + + try { + proxy = DFSUtil.createClientDatanodeProtocolProxy( + fakeDnId, conf, 500, fakeBlock); + + proxy.getReplicaVisibleLength(null); + fail ("Did not get expected exception: SocketTimeoutException"); + } catch (SocketTimeoutException e) { + LOG.info("Got the expected Exception: SocketTimeoutException"); + } finally { + if (proxy != null) { + RPC.stopProxy(proxy); + } + server.stop(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index 5a778049d5..7a93226e29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -25,8 +25,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import com.google.common.collect.Lists; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 511e9c1b92..95bf47f97c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import java.io.File; import java.io.IOException; @@ -32,8 +32,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; @@ -248,7 +248,7 @@ public void testRollback() throws Exception { baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); deleteMatchingFiles(baseDirs, "edits.*"); startNameNodeShouldFail(StartupOption.ROLLBACK, - "but there are no logs to load"); + "No non-corrupt logs for txid "); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode rollback with no image file", numDirs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java index 0455366e80..f0c20a1ca9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import java.io.File; @@ -27,11 +27,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; /** * This test ensures the appropriate response (successful or failure) from @@ -198,7 +198,7 @@ boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) { return false; } // check #3 - int softwareLV = FSConstants.LAYOUT_VERSION; // will also be Namenode's LV + int softwareLV = HdfsConstants.LAYOUT_VERSION; // will also be Namenode's LV int storedLV = datanodeVer.getLayoutVersion(); if (softwareLV == storedLV && datanodeVer.getCTime() == namenodeVer.getCTime()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index 020bdcfd0d..ebfe785fa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -24,11 +24,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; /** * This test ensures the appropriate response (successful or failure) from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index 58d3f3386f..251f23dee7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import java.io.File; import java.io.IOException; @@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index ec33f769ce..6ad08cd2aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -30,8 +30,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; @@ -290,7 +290,7 @@ private void upgradeAndVerify() throws IOException { DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); DFSClient dfsClient = dfs.dfs; //Safemode will be off only after upgrade is complete. Wait for it. - while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) { + while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) { LOG.info("Waiting for SafeMode to be OFF."); try { Thread.sleep(1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 24c3bc48b1..72faa319b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; @@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; @@ -97,8 +97,8 @@ private void sendRecvData(String testDescription, StringUtils.byteToHexString(sendBuf.toByteArray())); sock = new Socket(); - sock.connect(dnAddr, HdfsConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); + sock.connect(dnAddr, HdfsServerConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); OutputStream out = sock.getOutputStream(); // Should we excuse diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index a0da70c4ef..b6ecb0e4d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java index 58bd57a68a..62565170bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java @@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 5a542a263e..2cde7ed476 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -19,7 +19,7 @@ import java.net.InetSocketAddress; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.DFSClient; import junit.framework.TestCase; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index 61d1adc6f1..06194b8e43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.DataNode; import static org.apache.hadoop.test.MetricsAsserts.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 2a8383d31b..3069727a48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -293,10 +293,11 @@ static void refreshNodes(final FSNamesystem ns, final Configuration conf } private void verifyStats(NameNode namenode, FSNamesystem fsn, - DatanodeInfo node, boolean decommissioning) throws InterruptedException { + DatanodeInfo node, boolean decommissioning) + throws InterruptedException, IOException { // Do the stats check over 10 iterations for (int i = 0; i < 10; i++) { - long[] newStats = namenode.getStats(); + long[] newStats = namenode.getRpcServer().getStats(); // For decommissioning nodes, ensure capacity of the DN is no longer // counted. Only used space of the DN is counted in cluster capacity diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index 7a013d485d..1ba56d3844 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -36,14 +36,14 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Before; @@ -111,7 +111,7 @@ private void recoverFile(final FileSystem fs) throws Exception { // set the soft limit to be 1 second so that the // namenode triggers lease recovery upon append request - cluster.setLeasePeriod(1000, FSConstants.LEASE_HARDLIMIT_PERIOD); + cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD); // Trying recovery int tries = 60; @@ -151,8 +151,8 @@ public void testRecoverFinalizedBlock() throws Throwable { try { cluster.waitActive(); - NameNode preSpyNN = cluster.getNameNode(); - NameNode spyNN = spy(preSpyNN); + NamenodeProtocols preSpyNN = cluster.getNameNodeRpc(); + NamenodeProtocols spyNN = spy(preSpyNN); // Delay completeFile GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); @@ -222,8 +222,8 @@ public void testCompleteOtherLeaseHoldersFile() throws Throwable { try { cluster.waitActive(); - NameNode preSpyNN = cluster.getNameNode(); - NameNode spyNN = spy(preSpyNN); + NamenodeProtocols preSpyNN = cluster.getNameNodeRpc(); + NamenodeProtocols spyNN = spy(preSpyNN); // Delay completeFile GenericTestUtils.DelayAnswer delayer = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index d2dfd7fc65..642388e42c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -301,7 +301,7 @@ public void testFileCreationError1() throws IOException { // wait for the datanode to be declared dead while (true) { DatanodeInfo[] info = client.datanodeReport( - FSConstants.DatanodeReportType.LIVE); + HdfsConstants.DatanodeReportType.LIVE); if (info.length == 0) { break; } @@ -420,7 +420,7 @@ public void testFileCreationError3() throws IOException { final Path f = new Path("/foo.txt"); createFile(dfs, f, 3); try { - cluster.getNameNode().addBlock(f.toString(), + cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName, null, null); fail(); } catch(IOException ioe) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java index d4d66b4c7f..56cb4506c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -91,7 +91,7 @@ private static void writeFile(FileSystem fileSys, Path name, int repl, int fileSize, int blockSize) throws IOException { // Create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, - FSConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize); + HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 995006279c..736b765933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.BackupNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index 3e084e1547..80102582d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol; @@ -106,7 +106,7 @@ public void testBlockSynchronization() throws Exception { DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); - cluster.getNameNode().append(filestr, dfs.dfs.clientName); + cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName); // expire lease to trigger block recovery. waitLeaseRecovery(cluster); @@ -129,14 +129,14 @@ public void testBlockSynchronization() throws Exception { filestr = "/foo.safemode"; filepath = new Path(filestr); dfs.create(filepath, (short)1); - cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER); + cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, (short)1); waitLeaseRecovery(cluster); // verify that we still cannot recover the lease LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1); - cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); } finally { if (cluster != null) {cluster.shutdown();} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 1d3dd5206a..21d7f2dd81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -36,8 +36,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -316,6 +317,9 @@ public void testSoftLeaseRecovery() throws Exception { u2g_map.put(fakeUsername, new String[] {fakeGroup}); DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); + // Reset default lease periods + cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, + HdfsConstants.LEASE_HARDLIMIT_PERIOD); //create a file // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); @@ -428,7 +432,7 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename) cluster.getNameNode(), fileStr); assertFalse("original lease holder should not be the NN", - originalLeaseHolder.equals(HdfsConstants.NAMENODE_LEASE_HOLDER)); + originalLeaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)); // hflush file AppendTestUtil.LOG.info("hflush"); @@ -455,15 +459,15 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename) cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD); // Make sure lease recovery begins. - Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2); + Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2); - assertEquals("lease holder should now be the NN", HdfsConstants.NAMENODE_LEASE_HOLDER, + assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER, NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr)); cluster.restartNameNode(false); assertEquals("lease holder should still be the NN after restart", - HdfsConstants.NAMENODE_LEASE_HOLDER, + HdfsServerConstants.NAMENODE_LEASE_HOLDER, NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr)); // Let the DNs send heartbeats again. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java index 350f4694a4..2e3f6810ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java @@ -84,6 +84,7 @@ private static void writeFile(FileContext fc, Path name, int fileSize) public static void testShutdown() throws Exception { cluster.shutdown(); } + /** Test when input path is a file */ @Test public void testFile() throws IOException { @@ -199,4 +200,4 @@ public void testSymbolicLinks() throws IOException { assertEquals(fc.makeQualified(FILE1), stat.getPath()); assertFalse(itor.hasNext()); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java index 9f915b4506..abe9036bb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java @@ -23,7 +23,7 @@ import java.net.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index 3a3dde8304..1dc0b1ebd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.datanode.Replica; @@ -100,7 +100,7 @@ public void pipeline_01() throws IOException { ofs.writeBytes("Some more stuff to write"); ((DFSOutputStream) ofs.getWrappedStream()).hflush(); - List lb = cluster.getNameNode().getBlockLocations( + List lb = cluster.getNameNodeRpc().getBlockLocations( filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks(); String bpid = cluster.getNamesystem().getBlockPoolId(); @@ -111,7 +111,7 @@ public void pipeline_01() throws IOException { assertTrue("Replica on DN " + dn + " shouldn't be null", r != null); assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()", - HdfsConstants.ReplicaState.RBW, r.getState()); + HdfsServerConstants.ReplicaState.RBW, r.getState()); } ofs.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index fbc84f9864..a0727a6c90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; @@ -370,14 +370,14 @@ public void testNamespaceCommands() throws Exception { // 2: set the quota of /nqdir0/qdir1 to be 6 final Path quotaDir1 = new Path("/nqdir0/qdir1"); - dfs.setQuota(quotaDir1, 6, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET); ContentSummary c = dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(), 3); assertEquals(c.getQuota(), 6); // 3: set the quota of /nqdir0/qdir1/qdir20 to be 7 final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20"); - dfs.setQuota(quotaDir2, 7, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET); c = dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(), 2); assertEquals(c.getQuota(), 7); @@ -385,7 +385,7 @@ public void testNamespaceCommands() throws Exception { // 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2 final Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir3)); - dfs.setQuota(quotaDir3, 2, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET); c = dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(), 1); assertEquals(c.getQuota(), 2); @@ -547,13 +547,13 @@ public void testSpaceCommands() throws Exception { // set the quota of /nqdir0/qdir1 to 4 * fileSpace final Path quotaDir1 = new Path("/nqdir0/qdir1"); - dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 4 * fileSpace); + dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace); ContentSummary c = dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceQuota(), 4 * fileSpace); // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20"); - dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 6 * fileSpace); + dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace); c = dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceQuota(), 6 * fileSpace); @@ -561,7 +561,7 @@ public void testSpaceCommands() throws Exception { // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir21)); - dfs.setQuota(quotaDir21, FSConstants.QUOTA_DONT_SET, 2 * fileSpace); + dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace); c = dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceQuota(), 2 * fileSpace); @@ -661,7 +661,7 @@ public void testSpaceCommands() throws Exception { assertEquals(c.getSpaceConsumed(), 4 * fileSpace); // now increase the quota for quotaDir1 - dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 5 * fileSpace); + dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 5 * fileSpace); // Now, appending more than 1 fileLen should result in an error out = dfs.append(file2); hasException = false; @@ -704,8 +704,8 @@ public void testSpaceCommands() throws Exception { assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len); // now increase the quota for quotaDir1 and quotaDir20 - dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace); - dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 10 * fileSpace); + dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace); + dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace); // then increasing replication should be ok. dfs.setReplication(file2, (short)(replication+1)); @@ -735,7 +735,7 @@ public void testSpaceCommands() throws Exception { int sizeFactorC = 4; // Set space quota for subdirectory C - dfs.setQuota(quotaDir2053_C, FSConstants.QUOTA_DONT_SET, + dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET, (sizeFactorC + 1) * fileSpace); c = dfs.getContentSummary(quotaDir2053_C); assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index af2339dac3..eef83e4174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 8c98a20528..73adf8efcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import static org.junit.Assert.*; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java index e08d9f76ea..b230391dd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java @@ -23,7 +23,7 @@ import java.net.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 5771b22abf..337fa8a17c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -35,15 +35,15 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage; -import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; /** * This class defines a number of static helper methods used by the @@ -121,7 +121,7 @@ public static void initialize() throws Exception { .manageNameDfsDirs(false) .build(); - NameNode namenode = cluster.getNameNode(); + NamenodeProtocols namenode = cluster.getNameNodeRpc(); namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID(); namenodeStorageFsscTime = namenode.versionRequest().getCTime(); namenodeStorageClusterID = namenode.versionRequest().getClusterID(); @@ -504,7 +504,7 @@ public static void corruptFile(File file) throws IOException { * of the Namenode, whether it is running or not. */ public static int getCurrentLayoutVersion() { - return FSConstants.LAYOUT_VERSION; + return HdfsConstants.LAYOUT_VERSION; } /** @@ -517,7 +517,7 @@ public static int getCurrentLayoutVersion() { */ public static int getCurrentNamespaceID(MiniDFSCluster cluster) throws IOException { if (cluster != null) { - return cluster.getNameNode().versionRequest().getNamespaceID(); + return cluster.getNameNodeRpc().versionRequest().getNamespaceID(); } return namenodeStorageNamespaceID; } @@ -528,7 +528,7 @@ public static int getCurrentNamespaceID(MiniDFSCluster cluster) throws IOExcepti */ public static String getCurrentClusterID(MiniDFSCluster cluster) throws IOException { if (cluster != null) { - return cluster.getNameNode().versionRequest().getClusterID(); + return cluster.getNameNodeRpc().versionRequest().getClusterID(); } return namenodeStorageClusterID; } @@ -539,7 +539,7 @@ public static String getCurrentClusterID(MiniDFSCluster cluster) throws IOExcept */ public static String getCurrentBlockPoolID(MiniDFSCluster cluster) throws IOException { if (cluster != null) { - return cluster.getNameNode().versionRequest().getBlockPoolID(); + return cluster.getNameNodeRpc().versionRequest().getBlockPoolID(); } return namenodeStorageBlockPoolID; } @@ -554,7 +554,7 @@ public static String getCurrentBlockPoolID(MiniDFSCluster cluster) throws IOExce */ public static long getCurrentFsscTime(MiniDFSCluster cluster) throws IOException { if (cluster != null) { - return cluster.getNameNode().versionRequest().getCTime(); + return cluster.getNameNodeRpc().versionRequest().getCTime(); } return namenodeStorageFsscTime; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 63dabde88f..9ad87fe087 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -375,11 +375,11 @@ public void testBlockTokenInLastLocatedBlock() throws IOException, Path filePath = new Path(fileName); FSDataOutputStream out = fs.create(filePath, (short) 1); out.write(new byte[1000]); - LocatedBlocks locatedBlocks = cluster.getNameNode().getBlockLocations( + LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations( fileName, 0, 1000); while (locatedBlocks.getLastLocatedBlock() == null) { Thread.sleep(100); - locatedBlocks = cluster.getNameNode().getBlockLocations(fileName, 0, + locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000); } Token token = locatedBlocks.getLastLocatedBlock() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 5b0ac31559..34cd784bd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 9ee296c17c..45f41dc7ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; @@ -89,7 +89,7 @@ private static class Suite { this.cluster = cluster; clients = new ClientProtocol[nNameNodes]; for(int i = 0; i < nNameNodes; i++) { - clients[i] = cluster.getNameNode(i); + clients[i] = cluster.getNameNode(i).getRpcServer(); } replication = (short)Math.max(1, nDataNodes - 1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index e8193b56d5..565a765b1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -89,8 +89,8 @@ private void addNodes(Iterable nodesToAdd) { for (DatanodeDescriptor dn : nodesToAdd) { cluster.add(dn); dn.updateHeartbeat( - 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index 25a486b166..d9309edc1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -49,8 +49,9 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; @@ -137,8 +138,8 @@ private static void tryRead(Configuration conf, LocatedBlock lblock, DatanodeInfo[] nodes = lblock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getName()); s = new Socket(); - s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); String file = BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId()); @@ -314,6 +315,7 @@ public void testRead() throws Exception { assertEquals(numDataNodes, cluster.getDataNodes().size()); final NameNode nn = cluster.getNameNode(); + final NamenodeProtocols nnProto = nn.getRpcServer(); final BlockManager bm = nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager(); @@ -344,7 +346,7 @@ public void testRead() throws Exception { new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); - List locatedBlocks = nn.getBlockLocations( + List locatedBlocks = nnProto.getBlockLocations( FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks(); LocatedBlock lblock = locatedBlocks.get(0); // first block Token myToken = lblock.getBlockToken(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ee84feca81..794b23c652 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; @@ -76,8 +76,8 @@ public class TestReplicationPolicy extends TestCase { } for(int i=0; i blocksAfterReport = @@ -181,9 +180,10 @@ public void blockReport_02() throws IOException { List blocks2Remove = new ArrayList(); List removedIndex = new ArrayList(); - List lBlocks = cluster.getNameNode().getBlockLocations( - filePath.toString(), FILE_START, - FILE_SIZE).getLocatedBlocks(); + List lBlocks = + cluster.getNameNodeRpc().getBlockLocations( + filePath.toString(), FILE_START, + FILE_SIZE).getLocatedBlocks(); while (removedIndex.size() != 2) { int newRemoveIndex = rand.nextInt(lBlocks.size()); @@ -218,7 +218,7 @@ public void blockReport_02() throws IOException { DataNode dn = cluster.getDataNodes().get(DN_N0); String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); - cluster.getNameNode().blockReport(dnR, poolId, + cluster.getNameNodeRpc().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem() @@ -258,7 +258,8 @@ public void blockReport_03() throws IOException { DataNode dn = cluster.getDataNodes().get(DN_N0); String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); - DatanodeCommand dnCmd = cluster.getNameNode().blockReport(dnR, poolId, + DatanodeCommand dnCmd = + cluster.getNameNodeRpc().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); if(LOG.isDebugEnabled()) { LOG.debug("Got the command: " + dnCmd); @@ -310,7 +311,7 @@ public void blockReport_06() throws IOException { DataNode dn = cluster.getDataNodes().get(DN_N1); String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); - cluster.getNameNode().blockReport(dnR, poolId, + cluster.getNameNodeRpc().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); printStats(); assertEquals("Wrong number of PendingReplication Blocks", @@ -359,7 +360,7 @@ public void blockReport_07() throws IOException { DataNode dn = cluster.getDataNodes().get(DN_N1); String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); - cluster.getNameNode().blockReport(dnR, poolId, + cluster.getNameNodeRpc().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); printStats(); assertEquals("Wrong number of Corrupted blocks", @@ -381,7 +382,7 @@ public void blockReport_07() throws IOException { LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName()); } - cluster.getNameNode().blockReport(dnR, poolId, + cluster.getNameNodeRpc().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); printStats(); @@ -431,7 +432,7 @@ public void blockReport_08() throws IOException { DataNode dn = cluster.getDataNodes().get(DN_N1); String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); - cluster.getNameNode().blockReport(dnR, poolId, + cluster.getNameNodeRpc().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); printStats(); assertEquals("Wrong number of PendingReplication blocks", @@ -477,7 +478,7 @@ public void blockReport_09() throws IOException { DataNode dn = cluster.getDataNodes().get(DN_N1); String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); - cluster.getNameNode().blockReport(dnR, poolId, + cluster.getNameNodeRpc().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); printStats(); assertEquals("Wrong number of PendingReplication blocks", @@ -526,12 +527,12 @@ private void waitForTempReplica(Block bl, int DN_N1) throws IOException { tooLongWait); } - HdfsConstants.ReplicaState state = r.getState(); + HdfsServerConstants.ReplicaState state = r.getState(); if(LOG.isDebugEnabled()) { LOG.debug("Replica state before the loop " + state.getValue()); } start = System.currentTimeMillis(); - while (state != HdfsConstants.ReplicaState.TEMPORARY) { + while (state != HdfsServerConstants.ReplicaState.TEMPORARY) { waitTil(5); state = r.getState(); if(LOG.isDebugEnabled()) { @@ -590,7 +591,7 @@ private ArrayList prepareForRide(final Path filePath, DFSTestUtil.createFile(fs, filePath, fileSize, REPL_FACTOR, rand.nextLong()); - return locatedToBlocks(cluster.getNameNode() + return locatedToBlocks(cluster.getNameNodeRpc() .getBlockLocations(filePath.toString(), FILE_START, fileSize).getLocatedBlocks(), null); } @@ -707,7 +708,8 @@ private void corruptBlockGS(final Block block) private Block findBlock(Path path, long size) throws IOException { Block ret; List lbs = - cluster.getNameNode().getBlockLocations(path.toString(), + cluster.getNameNodeRpc() + .getBlockLocations(path.toString(), FILE_START, size).getLocatedBlocks(); LocatedBlock lb = lbs.get(lbs.size() - 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index a98b0afd6e..04fa0ac87f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -30,9 +30,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo; +import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.junit.Assert; import org.junit.Before; @@ -65,14 +66,14 @@ public void test2NNRegistration() throws IOException { assertNotNull("cannot create nn1", nn1); assertNotNull("cannot create nn2", nn2); - String bpid1 = nn1.getFSImage().getBlockPoolID(); - String bpid2 = nn2.getFSImage().getBlockPoolID(); - String cid1 = nn1.getFSImage().getClusterID(); - String cid2 = nn2.getFSImage().getClusterID(); - int lv1 = nn1.getFSImage().getLayoutVersion(); - int lv2 = nn2.getFSImage().getLayoutVersion(); - int ns1 = nn1.getFSImage().getNamespaceID(); - int ns2 = nn2.getFSImage().getNamespaceID(); + String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); + String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID(); + String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID(); + String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID(); + int lv1 =FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); + int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion(); + int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID(); + int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID(); assertNotSame("namespace ids should be different", ns1, ns2); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); @@ -135,9 +136,9 @@ public void testFedSingleNN() throws IOException { NameNode nn1 = cluster.getNameNode(); assertNotNull("cannot create nn1", nn1); - String bpid1 = nn1.getFSImage().getBlockPoolID(); - String cid1 = nn1.getFSImage().getClusterID(); - int lv1 = nn1.getFSImage().getLayoutVersion(); + String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); + String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID(); + int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 208df40795..a541bcb5d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -38,10 +38,10 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.net.NetUtils; @@ -144,7 +144,7 @@ public void testVolumeFailure() throws IOException { String bpid = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid); long[] bReport = dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs(); - cluster.getNameNode().blockReport(dnR, bpid, bReport); + cluster.getNameNodeRpc().blockReport(dnR, bpid, bReport); // verify number of blocks and files... verify(filename, filesize); @@ -216,7 +216,7 @@ private void verify(String fn, int fs) throws IOException{ * @throws IOException */ private void triggerFailure(String path, long size) throws IOException { - NameNode nn = cluster.getNameNode(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); List locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks(); @@ -265,8 +265,8 @@ private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) targetAddr = NetUtils.createSocketAddr(datanode.getName()); s = new Socket(); - s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsConstants.READ_TIMEOUT); + s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); String file = BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", @@ -291,7 +291,7 @@ private int countNNBlocks(Map map, String path, long size) throws IOException { int total = 0; - NameNode nn = cluster.getNameNode(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); List locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks(); //System.out.println("Number of blocks: " + locatedBlocks.size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java index 464af27d66..d0beaa2698 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java index eb58f7f195..90869a2637 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java @@ -22,6 +22,20 @@ import java.io.IOException; import java.util.List; +import java.net.InetSocketAddress; + +import java.net.SocketTimeoutException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StringUtils; + +import org.apache.hadoop.ipc.Client; +import org.apache.hadoop.ipc.ProtocolSignature; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -37,7 +51,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; @@ -48,6 +63,50 @@ * This tests InterDataNodeProtocol for block handling. */ public class TestInterDatanodeProtocol { + private static final String ADDRESS = "0.0.0.0"; + final static private int PING_INTERVAL = 1000; + final static private int MIN_SLEEP_TIME = 1000; + private static Configuration conf = new HdfsConfiguration(); + + + private static class TestServer extends Server { + private boolean sleep; + private Class responseClass; + + public TestServer(int handlerCount, boolean sleep) throws IOException { + this(handlerCount, sleep, LongWritable.class, null); + } + + public TestServer(int handlerCount, boolean sleep, + Class paramClass, + Class responseClass) + throws IOException { + super(ADDRESS, 0, paramClass, handlerCount, conf); + this.sleep = sleep; + this.responseClass = responseClass; + } + + @Override + public Writable call(String protocol, Writable param, long receiveTime) + throws IOException { + if (sleep) { + // sleep a bit + try { + Thread.sleep(PING_INTERVAL + MIN_SLEEP_TIME); + } catch (InterruptedException e) {} + } + if (responseClass != null) { + try { + return responseClass.newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + return param; // echo param as result + } + } + } + public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException { Block metainfo = dn.data.getStoredBlock(b.getBlockPoolId(), b.getBlockId()); Assert.assertEquals(b.getBlockId(), metainfo.getBlockId()); @@ -73,7 +132,6 @@ public static LocatedBlock getLastLocatedBlock( */ @Test public void testBlockMetaDataInfo() throws Exception { - Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { @@ -222,7 +280,6 @@ public void testInitReplicaRecovery() throws IOException { * */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { - final Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { @@ -291,4 +348,33 @@ public void testUpdateReplicaUnderRecovery() throws IOException { if (cluster != null) cluster.shutdown(); } } + + /** Test to verify that InterDatanode RPC timesout as expected when + * the server DN does not respond. + */ + @Test + public void testInterDNProtocolTimeout() throws Exception { + final Server server = new TestServer(1, true); + server.start(); + + final InetSocketAddress addr = NetUtils.getConnectAddress(server); + DatanodeID fakeDnId = new DatanodeID( + "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort()); + DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); + InterDatanodeProtocol proxy = null; + + try { + proxy = DataNode.createInterDataNodeProtocolProxy( + dInfo, conf, 500); + proxy.initReplicaRecovery(null); + fail ("Expected SocketTimeoutException exception, but did not get."); + } catch (SocketTimeoutException e) { + DataNode.LOG.info("Got expected Exception: SocketTimeoutException" + e); + } finally { + if (proxy != null) { + RPC.stopProxy(proxy); + } + server.stop(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java index fba411ab88..7237f2a93e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java @@ -32,10 +32,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.log4j.Level; import org.junit.Assert; @@ -109,7 +109,7 @@ public void testTransferRbw() throws Exception { final DatanodeInfo oldnodeinfo; { - final DatanodeInfo[] datatnodeinfos = cluster.getNameNode( + final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc( ).getDatanodeReport(DatanodeReportType.LIVE); Assert.assertEquals(2, datatnodeinfos.length); int i = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 5deccd5c22..c90b2900db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -27,7 +27,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; @@ -354,12 +353,9 @@ static List getNameNodeCurrentDirs(MiniDFSCluster cluster) { */ public static EditLogFile findLatestEditsLog(StorageDirectory sd) throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - inspector.inspectDirectory(sd); - - List foundEditLogs = Lists.newArrayList( - inspector.getEditLogFiles()); + File currentDir = sd.getCurrentDir(); + List foundEditLogs + = Lists.newArrayList(FileJournalManager.matchEditLogs(currentDir.listFiles())); return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID); } @@ -411,4 +407,9 @@ public static void logStorageContents(Log LOG, NNStorage storage) { } } } + + /** get the fsImage*/ + public static FSImage getFSImage(NameNode node) { + return node.getFSImage(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index f4cedf9048..afc003f938 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -101,6 +102,7 @@ public class NNThroughputBenchmark { static Configuration config; static NameNode nameNode; + static NamenodeProtocols nameNodeProto; NNThroughputBenchmark(Configuration conf) throws IOException, LoginException { config = conf; @@ -120,6 +122,7 @@ public class NNThroughputBenchmark { // Start the NameNode String[] argv = new String[] {}; nameNode = NameNode.createNameNode(argv, config); + nameNodeProto = nameNode.getRpcServer(); } void close() throws IOException { @@ -265,9 +268,9 @@ private boolean isInPorgress() { } void cleanUp() throws IOException { - nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); if(!keepResults) - nameNode.delete(getBaseDir(), true); + nameNodeProto.delete(getBaseDir(), true); } int getNumOpsExecuted() { @@ -398,7 +401,7 @@ public String toString() { void benchmarkOne() throws IOException { for(int idx = 0; idx < opsPerThread; idx++) { if((localNumOpsExecuted+1) % statsOp.ugcRefreshCount == 0) - nameNode.refreshUserToGroupsMappings(); + nameNodeProto.refreshUserToGroupsMappings(); long stat = statsOp.executeOp(daemonId, idx, arg1); localNumOpsExecuted++; localCumulativeTime += stat; @@ -459,9 +462,9 @@ String getExecutionArgument(int daemonId) { */ long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { - nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); long start = System.currentTimeMillis(); - nameNode.delete(BASE_DIR_NAME, true); + nameNodeProto.delete(BASE_DIR_NAME, true); long end = System.currentTimeMillis(); return end-start; } @@ -523,7 +526,7 @@ void parseArguments(List args) { void generateInputs(int[] opsPerThread) throws IOException { assert opsPerThread.length == numThreads : "Error opsPerThread.length"; - nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); // int generatedFileIdx = 0; LOG.info("Generate " + numOpsRequired + " intputs for " + getOpName()); fileNames = new String[numThreads][]; @@ -555,12 +558,12 @@ long executeOp(int daemonId, int inputIdx, String clientName) throws IOException { long start = System.currentTimeMillis(); // dummyActionNoSynch(fileIdx); - nameNode.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(), + nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE); long end = System.currentTimeMillis(); for(boolean written = !closeUponCreate; !written; - written = nameNode.complete(fileNames[daemonId][inputIdx], + written = nameNodeProto.complete(fileNames[daemonId][inputIdx], clientName, null)); return end-start; } @@ -628,11 +631,11 @@ void generateInputs(int[] opsPerThread) throws IOException { } // use the same files for open super.generateInputs(opsPerThread); - if(nameNode.getFileInfo(opCreate.getBaseDir()) != null - && nameNode.getFileInfo(getBaseDir()) == null) { - nameNode.rename(opCreate.getBaseDir(), getBaseDir()); + if(nameNodeProto.getFileInfo(opCreate.getBaseDir()) != null + && nameNodeProto.getFileInfo(getBaseDir()) == null) { + nameNodeProto.rename(opCreate.getBaseDir(), getBaseDir()); } - if(nameNode.getFileInfo(getBaseDir()) == null) { + if(nameNodeProto.getFileInfo(getBaseDir()) == null) { throw new IOException(getBaseDir() + " does not exist."); } } @@ -643,7 +646,7 @@ void generateInputs(int[] opsPerThread) throws IOException { long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { long start = System.currentTimeMillis(); - nameNode.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE); + nameNodeProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE); long end = System.currentTimeMillis(); return end-start; } @@ -671,7 +674,7 @@ String getOpName() { long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { long start = System.currentTimeMillis(); - nameNode.delete(fileNames[daemonId][inputIdx], false); + nameNodeProto.delete(fileNames[daemonId][inputIdx], false); long end = System.currentTimeMillis(); return end-start; } @@ -699,7 +702,7 @@ String getOpName() { long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { long start = System.currentTimeMillis(); - nameNode.getFileInfo(fileNames[daemonId][inputIdx]); + nameNodeProto.getFileInfo(fileNames[daemonId][inputIdx]); long end = System.currentTimeMillis(); return end-start; } @@ -741,7 +744,7 @@ void generateInputs(int[] opsPerThread) throws IOException { long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { long start = System.currentTimeMillis(); - nameNode.rename(fileNames[daemonId][inputIdx], + nameNodeProto.rename(fileNames[daemonId][inputIdx], destNames[daemonId][inputIdx]); long end = System.currentTimeMillis(); return end-start; @@ -788,11 +791,11 @@ String getName() { void register() throws IOException { // get versions from the namenode - nsInfo = nameNode.versionRequest(); + nsInfo = nameNodeProto.versionRequest(); dnRegistration.setStorageInfo(new DataStorage(nsInfo, "")); DataNode.setNewStorageID(dnRegistration); // register datanode - dnRegistration = nameNode.registerDatanode(dnRegistration); + dnRegistration = nameNodeProto.registerDatanode(dnRegistration); } /** @@ -802,7 +805,7 @@ void register() throws IOException { void sendHeartbeat() throws IOException { // register datanode // TODO:FEDERATION currently a single block pool is supported - DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration, + DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0); if(cmds != null) { for (DatanodeCommand cmd : cmds ) { @@ -847,7 +850,7 @@ public int compareTo(String name) { int replicateBlocks() throws IOException { // register datanode // TODO:FEDERATION currently a single block pool is supported - DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration, + DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0); if (cmds != null) { for (DatanodeCommand cmd : cmds) { @@ -878,7 +881,7 @@ private int transferBlocks( Block blocks[], receivedDNReg.setStorageInfo( new DataStorage(nsInfo, dnInfo.getStorageID())); receivedDNReg.setInfoPort(dnInfo.getInfoPort()); - nameNode.blockReceivedAndDeleted(receivedDNReg, nameNode + nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode .getNamesystem().getBlockPoolId(), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo( blocks[i], DataNode.EMPTY_DEL_HINT) }); @@ -969,14 +972,14 @@ void generateInputs(int[] ignore) throws IOException { FileNameGenerator nameGenerator; nameGenerator = new FileNameGenerator(getBaseDir(), 100); String clientName = getClientName(007); - nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); for(int idx=0; idx < nrFiles; idx++) { String fileName = nameGenerator.getNextFileName("ThroughputBench"); - nameNode.create(fileName, FsPermission.getDefault(), clientName, + nameNodeProto.create(fileName, FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE); ExtendedBlock lastBlock = addBlocks(fileName, clientName); - nameNode.complete(fileName, clientName, lastBlock); + nameNodeProto.complete(fileName, clientName, lastBlock); } // prepare block reports for(int idx=0; idx < nrDatanodes; idx++) { @@ -988,12 +991,12 @@ private ExtendedBlock addBlocks(String fileName, String clientName) throws IOException { ExtendedBlock prevBlock = null; for(int jdx = 0; jdx < blocksPerFile; jdx++) { - LocatedBlock loc = nameNode.addBlock(fileName, clientName, prevBlock, null); + LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName()); datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock()); - nameNode.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc + nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc .getBlock().getBlockPoolId(), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(loc .getBlock().getLocalBlock(), "") }); @@ -1013,7 +1016,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { assert daemonId < numThreads : "Wrong daemonId."; TinyDatanode dn = datanodes[daemonId]; long start = System.currentTimeMillis(); - nameNode.blockReport(dn.dnRegistration, nameNode.getNamesystem() + nameNodeProto.blockReport(dn.dnRegistration, nameNode.getNamesystem() .getBlockPoolId(), dn.getBlockReportList()); long end = System.currentTimeMillis(); return end-start; @@ -1146,7 +1149,7 @@ private void decommissionNodes() throws IOException { LOG.info("Datanode " + dn.getName() + " is decommissioned."); } excludeFile.close(); - nameNode.refreshNodes(); + nameNodeProto.refreshNodes(); } /** @@ -1160,8 +1163,8 @@ long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { assert daemonId < numThreads : "Wrong daemonId."; long start = System.currentTimeMillis(); // compute data-node work - int work = BlockManagerTestUtil.getComputedDatanodeWork(nameNode - .getNamesystem().getBlockManager()); + int work = BlockManagerTestUtil.getComputedDatanodeWork( + nameNode.getNamesystem().getBlockManager()); long end = System.currentTimeMillis(); numPendingBlocks += work; if(work == 0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index afa39dfcf2..76f0b9408a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -46,13 +46,13 @@ public static LocatedBlocks getBlockLocations(NameNode namenode, return namenode.getNamesystem().getBlockLocations( src, offset, length, false, true); } - + /** * Get the internal RPC server instance. * @return rpc server */ public static Server getRpcServer(NameNode namenode) { - return namenode.server; + return ((NameNodeRpcServer)namenode.getRpcServer()).server; } public static DelegationTokenSecretManager getDtSecretManager( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index 433670ba4c..3fca8a3808 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -172,7 +172,7 @@ private CheckpointSignature runOperations() throws IOException { long atime = mtime; dfs.setTimes(pathFileCreate, mtime, atime); // OP_SET_QUOTA 14 - dfs.setQuota(pathDirectoryMkdir, 1000L, FSConstants.QUOTA_DONT_SET); + dfs.setQuota(pathDirectoryMkdir, 1000L, HdfsConstants.QUOTA_DONT_SET); // OP_RENAME 15 fc.rename(pathFileCreate, pathFileMoved, Rename.NONE); // OP_CONCAT_DELETE 16 @@ -239,10 +239,10 @@ public Object run() throws IOException { LOG.info("Innocuous exception", e); } locatedBlocks = DFSClientAdapter.callGetBlockLocations( - cluster.getNameNode(), filePath, 0L, bytes.length); + cluster.getNameNodeRpc(), filePath, 0L, bytes.length); } while (locatedBlocks.isUnderConstruction()); // Force a roll so we get an OP_END_LOG_SEGMENT txn - return cluster.getNameNode().rollEditLog(); + return cluster.getNameNodeRpc().rollEditLog(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 72e3bf7230..2a27c37fc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -31,9 +31,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; @@ -128,12 +129,13 @@ public void testBackupNodeTailsEdits() throws Exception { fileSys = cluster.getFileSystem(); backup = startBackupNode(conf, StartupOption.BACKUP, 1); - BackupImage bnImage = backup.getBNImage(); + BackupImage bnImage = (BackupImage) backup.getFSImage(); testBNInSync(cluster, backup, 1); // Force a roll -- BN should roll with NN. NameNode nn = cluster.getNameNode(); - nn.rollEditLog(); + NamenodeProtocols nnRpc = nn.getRpcServer(); + nnRpc.rollEditLog(); assertEquals(bnImage.getEditLog().getCurSegmentTxId(), nn.getFSImage().getEditLog().getCurSegmentTxId()); @@ -207,7 +209,9 @@ public Boolean get() { LOG.info("Checking for " + src + " on BN"); try { boolean hasFile = backup.getNamesystem().getFileInfo(src, false) != null; - boolean txnIdMatch = backup.getTransactionID() == nn.getTransactionID(); + boolean txnIdMatch = + backup.getRpcServer().getTransactionID() == + nn.getRpcServer().getTransactionID(); return hasFile && txnIdMatch; } catch (Exception e) { throw new RuntimeException(e); @@ -264,7 +268,7 @@ void testCheckpoint(StartupOption op) throws Exception { // // Take a checkpoint // - long txid = cluster.getNameNode().getTransactionID(); + long txid = cluster.getNameNodeRpc().getTransactionID(); backup = startBackupNode(conf, op, 1); waitCheckpointDone(cluster, backup, txid); } catch(IOException e) { @@ -300,18 +304,18 @@ void testCheckpoint(StartupOption op) throws Exception { // Take a checkpoint // backup = startBackupNode(conf, op, 1); - long txid = cluster.getNameNode().getTransactionID(); + long txid = cluster.getNameNodeRpc().getTransactionID(); waitCheckpointDone(cluster, backup, txid); for (int i = 0; i < 10; i++) { fileSys.mkdirs(new Path("file_" + i)); } - txid = cluster.getNameNode().getTransactionID(); + txid = cluster.getNameNodeRpc().getTransactionID(); backup.doCheckpoint(); waitCheckpointDone(cluster, backup, txid); - txid = cluster.getNameNode().getTransactionID(); + txid = cluster.getNameNodeRpc().getTransactionID(); backup.doCheckpoint(); waitCheckpointDone(cluster, backup, txid); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index beb70bd9b1..66e60b0271 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -37,7 +37,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -152,7 +153,7 @@ public void testBlockCreation() throws IOException { */ @Test public void testGetBlockLocations() throws IOException { - final NameNode namenode = cluster.getNameNode(); + final NamenodeProtocols namenode = cluster.getNameNodeRpc(); final Path p = new Path(BASE_DIR, "file2.dat"); final String src = p.toString(); final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java index 4f698c08d3..fbbcfc72f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; @@ -84,8 +84,10 @@ public void testSaveNamespace() throws IOException { for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) { EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd); assertTrue(log.isInProgress()); + log.validateLog(); + long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1; assertEquals("In-progress log " + log + " should have 5 transactions", - 5, log.validateLog().numTransactions); + 5, numTransactions);; } // Saving image in safe mode should succeed @@ -99,8 +101,10 @@ public void testSaveNamespace() throws IOException { for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) { EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd); assertTrue(log.isInProgress()); + log.validateLog(); + long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1; assertEquals("In-progress log " + log + " should only have START txn", - 1, log.validateLog().numTransactions); + 1, numTransactions); } // restart cluster diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 96d46783fe..f88343e0a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -45,13 +45,14 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.CheckpointStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.tools.DFSAdmin; @@ -982,11 +983,12 @@ public void testCheckpointSignature() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(true).build(); NameNode nn = cluster.getNameNode(); + NamenodeProtocols nnRpc = nn.getRpcServer(); SecondaryNameNode secondary = startSecondaryNameNode(conf); // prepare checkpoint image secondary.doCheckpoint(); - CheckpointSignature sig = nn.rollEditLog(); + CheckpointSignature sig = nnRpc.rollEditLog(); // manipulate the CheckpointSignature fields sig.setBlockpoolID("somerandomebpid"); sig.clusterID = "somerandomcid"; @@ -1073,8 +1075,10 @@ public void testMultipleSecondaryNamenodes() throws IOException { .nameNodePort(9928).build(); Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0)); Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1)); - InetSocketAddress nn1RpcAddress = cluster.getNameNode(0).rpcAddress; - InetSocketAddress nn2RpcAddress = cluster.getNameNode(1).rpcAddress; + InetSocketAddress nn1RpcAddress = + cluster.getNameNode(0).getNameNodeAddress(); + InetSocketAddress nn2RpcAddress = + cluster.getNameNode(1).getNameNodeAddress(); String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort(); String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort(); @@ -1212,7 +1216,7 @@ public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { CheckpointStorage spyImage1 = spyOnSecondaryImage(secondary1); DelayAnswer delayer = new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(spyImage1) - .saveFSImageInAllDirs(Mockito.anyLong()); + .saveFSImageInAllDirs(Mockito.any(), Mockito.anyLong()); // Set up a thread to do a checkpoint from the first 2NN DoCheckpointThread checkpointThread = new DoCheckpointThread(secondary1); @@ -1444,9 +1448,9 @@ public void testNamespaceVerifiedOnFileTransfer() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(true).build(); - NameNode nn = cluster.getNameNode(); - String fsName = NameNode.getHostPortString(nn.getHttpAddress()); - + NamenodeProtocols nn = cluster.getNameNodeRpc(); + String fsName = NameNode.getHostPortString( + cluster.getNameNode().getHttpAddress()); // Make a finalized log on the server side. nn.rollEditLog(); @@ -1515,8 +1519,8 @@ public void testCheckpointWithFailedStorageDir() throws Exception { // Now primary NN experiences failure of a volume -- fake by // setting its current dir to a-x permissions - NameNode nn = cluster.getNameNode(); - NNStorage storage = nn.getFSImage().getStorage(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); + NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0 = storage.getStorageDir(0); StorageDirectory sd1 = storage.getStorageDir(1); @@ -1590,8 +1594,8 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { // Now primary NN experiences failure of its only name dir -- fake by // setting its current dir to a-x permissions - NameNode nn = cluster.getNameNode(); - NNStorage storage = nn.getFSImage().getStorage(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); + NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); @@ -1704,7 +1708,7 @@ public void testSecondaryHasVeryOutOfDateImage() throws IOException { secondary.doCheckpoint(); // Now primary NN saves namespace 3 times - NameNode nn = cluster.getNameNode(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); for (int i = 0; i < 3; i++) { nn.saveNamespace(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index 94b733cefe..220bfd6a39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -34,7 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.junit.After; @@ -50,7 +50,7 @@ private String getClusterId(Configuration config) throws IOException { // see if cluster id not empty. Collection dirsToFormat = FSNamesystem.getNamespaceDirs(config); Collection editsToFormat = new ArrayList(0); - FSImage fsImage = new FSImage(config, null, dirsToFormat, editsToFormat); + FSImage fsImage = new FSImage(config, dirsToFormat, editsToFormat); Iterator sdit = fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index c6bd56955a..bc33f175bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -103,7 +103,7 @@ public void testDeadDatanode() throws Exception { dn.shutdown(); waitForDatanodeState(reg.getStorageID(), false, 20000); - DatanodeProtocol dnp = cluster.getNameNode(); + DatanodeProtocol dnp = cluster.getNameNodeRpc(); ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo( new Block(0), "") }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index be78c0df3a..0d2ea934e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.junit.AfterClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 9e55946349..123810c9dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.*; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -80,7 +81,7 @@ public class TestEditLog extends TestCase { static final int NUM_TRANSACTIONS = 100; static final int NUM_THREADS = 100; - private static final File TEST_DIR = new File( + static final File TEST_DIR = new File( System.getProperty("test.build.data","build/test/data")); /** An edits log with 3 edits from 0.20 - the result of @@ -627,13 +628,23 @@ private void testCrashRecovery(int numTransactions) throws Exception { } public void testCrashRecoveryEmptyLogOneDir() throws Exception { - doTestCrashRecoveryEmptyLog(false); + doTestCrashRecoveryEmptyLog(false, true); } public void testCrashRecoveryEmptyLogBothDirs() throws Exception { - doTestCrashRecoveryEmptyLog(true); + doTestCrashRecoveryEmptyLog(true, true); + } + + public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId() + throws Exception { + doTestCrashRecoveryEmptyLog(false, false); } + public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId() + throws Exception { + doTestCrashRecoveryEmptyLog(true, false); + } + /** * Test that the NN handles the corruption properly * after it crashes just after creating an edit log @@ -646,8 +657,14 @@ public void testCrashRecoveryEmptyLogBothDirs() throws Exception { * will only be in one of the directories. In both cases, the * NN should fail to start up, because it's aware that txid 3 * was reached, but unable to find a non-corrupt log starting there. + * @param updateTransactionIdFile if true update the seen_txid file. + * If false, the it will not be updated. This will simulate a case + * where the NN crashed between creating the new segment and updating + * seen_txid. */ - private void doTestCrashRecoveryEmptyLog(boolean inBothDirs) throws Exception { + private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, + boolean updateTransactionIdFile) + throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; @@ -665,6 +682,14 @@ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs) throws Exception { // Make a truncated edits_3_inprogress File log = new File(currentDir, NNStorage.getInProgressEditsFileName(3)); + NNStorage storage = new NNStorage(conf, + Collections.emptyList(), + Lists.newArrayList(uri)); + if (updateTransactionIdFile) { + storage.writeTransactionIdFileToStorage(3); + } + storage.close(); + new EditLogFileOutputStream(log, 1024).create(); if (!inBothDirs) { break; @@ -675,9 +700,9 @@ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs) throws Exception { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES).format(false).build(); fail("Did not fail to start with all-corrupt logs"); - } catch (IllegalStateException ise) { + } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( - "No non-corrupt logs for txid 3", ise); + "No non-corrupt logs for txid 3", ioe); } cluster.shutdown(); } @@ -702,7 +727,17 @@ public EditLogByteInputStream(byte[] data) throws IOException { reader = new FSEditLogOp.Reader(in, version); } + + @Override + public long getFirstTxId() throws IOException { + return HdfsConstants.INVALID_TXID; + } + @Override + public long getLastTxId() throws IOException { + return HdfsConstants.INVALID_TXID; + } + @Override public long length() throws IOException { return len; @@ -852,6 +887,168 @@ private NNStorage mockStorageWithEdits(String... editsDirSpecs) { Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS); return storage; } - - + + /** + * Specification for a failure during #setupEdits + */ + static class AbortSpec { + final int roll; + final int logindex; + + /** + * Construct the failure specification. + * @param roll number to fail after. e.g. 1 to fail after the first roll + * @param loginfo index of journal to fail. + */ + AbortSpec(int roll, int logindex) { + this.roll = roll; + this.logindex = logindex; + } + } + + final static int TXNS_PER_ROLL = 10; + final static int TXNS_PER_FAIL = 2; + + /** + * Set up directories for tests. + * + * Each rolled file is 10 txns long. + * A failed file is 2 txns long. + * + * @param editUris directories to create edit logs in + * @param numrolls number of times to roll the edit log during setup + * @param abortAtRolls Specifications for when to fail, see AbortSpec + */ + public static NNStorage setupEdits(List editUris, int numrolls, + AbortSpec... abortAtRolls) + throws IOException { + List aborts = new ArrayList(Arrays.asList(abortAtRolls)); + NNStorage storage = new NNStorage(new Configuration(), + Collections.emptyList(), + editUris); + storage.format("test-cluster-id"); + FSEditLog editlog = new FSEditLog(storage); + // open the edit log and add two transactions + // logGenerationStamp is used, simply because it doesn't + // require complex arguments. + editlog.open(); + for (int i = 2; i < TXNS_PER_ROLL; i++) { + editlog.logGenerationStamp((long)0); + } + editlog.logSync(); + + // Go into edit log rolling loop. + // On each roll, the abortAtRolls abort specs are + // checked to see if an abort is required. If so the + // the specified journal is aborted. It will be brought + // back into rotation automatically by rollEditLog + for (int i = 0; i < numrolls; i++) { + editlog.rollEditLog(); + + editlog.logGenerationStamp((long)i); + editlog.logSync(); + + while (aborts.size() > 0 + && aborts.get(0).roll == (i+1)) { + AbortSpec spec = aborts.remove(0); + editlog.getJournals().get(spec.logindex).abort(); + } + + for (int j = 3; j < TXNS_PER_ROLL; j++) { + editlog.logGenerationStamp((long)i); + } + editlog.logSync(); + } + editlog.close(); + + FSImageTestUtil.logStorageContents(LOG, storage); + return storage; + } + + /** + * Test loading an editlog which has had both its storage fail + * on alternating rolls. Two edit log directories are created. + * The first on fails on odd rolls, the second on even. Test + * that we are able to load the entire editlog regardless. + */ + @Test + public void testAlternatingJournalFailure() throws IOException { + File f1 = new File(TEST_DIR + "/alternatingjournaltest0"); + File f2 = new File(TEST_DIR + "/alternatingjournaltest1"); + + List editUris = ImmutableList.of(f1.toURI(), f2.toURI()); + + NNStorage storage = setupEdits(editUris, 10, + new AbortSpec(1, 0), + new AbortSpec(2, 1), + new AbortSpec(3, 0), + new AbortSpec(4, 1), + new AbortSpec(5, 0), + new AbortSpec(6, 1), + new AbortSpec(7, 0), + new AbortSpec(8, 1), + new AbortSpec(9, 0), + new AbortSpec(10, 1)); + long totaltxnread = 0; + FSEditLog editlog = new FSEditLog(storage); + long startTxId = 1; + Iterable editStreams = editlog.selectInputStreams(startTxId, + TXNS_PER_ROLL*11); + + for (EditLogInputStream edits : editStreams) { + FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(edits); + long read = val.getNumTransactions(); + LOG.info("Loading edits " + edits + " read " + read); + assertEquals(startTxId, val.getStartTxId()); + startTxId += read; + totaltxnread += read; + } + + editlog.close(); + storage.close(); + assertEquals(TXNS_PER_ROLL*11, totaltxnread); + } + + /** + * Test loading an editlog with gaps. A single editlog directory + * is set up. On of the edit log files is deleted. This should + * fail when selecting the input streams as it will not be able + * to select enough streams to load up to 4*TXNS_PER_ROLL. + * There should be 4*TXNS_PER_ROLL transactions as we rolled 3 + * times. + */ + @Test + public void testLoadingWithGaps() throws IOException { + File f1 = new File(TEST_DIR + "/gaptest0"); + List editUris = ImmutableList.of(f1.toURI()); + + NNStorage storage = setupEdits(editUris, 3); + + final long startGapTxId = 1*TXNS_PER_ROLL + 1; + final long endGapTxId = 2*TXNS_PER_ROLL; + + File[] files = new File(f1, "current").listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, + endGapTxId))) { + return true; + } + return false; + } + }); + assertEquals(1, files.length); + assertTrue(files[0].delete()); + + FSEditLog editlog = new FSEditLog(storage); + long startTxId = 1; + try { + Iterable editStreams + = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL); + + fail("Should have thrown exception"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "No non-corrupt logs for txid " + startGapTxId, ioe); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java index a673c5f3b3..1228bef604 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java @@ -63,8 +63,8 @@ public void testPreallocation() throws IOException { EditLogValidation validation = EditLogFileInputStream.validateEditLog(editLog); assertEquals("Edit log should contain a header as valid length", - HEADER_LEN, validation.validLength); - assertEquals(1, validation.numTransactions); + HEADER_LEN, validation.getValidLength()); + assertEquals(1, validation.getNumTransactions()); assertEquals("Edit log should have 1MB of bytes allocated", 1024*1024, editLog.length()); @@ -72,12 +72,12 @@ public void testPreallocation() throws IOException { cluster.getFileSystem().mkdirs(new Path("/tmp"), new FsPermission((short)777)); - long oldLength = validation.validLength; + long oldLength = validation.getValidLength(); validation = EditLogFileInputStream.validateEditLog(editLog); assertTrue("Edit log should have more valid data after writing a txn " + - "(was: " + oldLength + " now: " + validation.validLength + ")", - validation.validLength > oldLength); - assertEquals(2, validation.numTransactions); + "(was: " + oldLength + " now: " + validation.getValidLength() + ")", + validation.getValidLength() > oldLength); + assertEquals(2, validation.getNumTransactions()); assertEquals("Edit log should be 1MB long", 1024 * 1024, editLog.length()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 88d57da32a..bc5aa162fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.log4j.Level; @@ -350,7 +350,7 @@ public void testSaveImageWhileSyncInProgress() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); - final FSNamesystem namesystem = new FSNamesystem(conf); + final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf); try { FSImage fsimage = namesystem.getFSImage(); @@ -448,7 +448,7 @@ public void testSaveRightBeforeSync() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); - final FSNamesystem namesystem = new FSNamesystem(conf); + final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf); try { FSImage fsimage = namesystem.getFSImage(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index a53a0bf26c..84692a4d82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -187,8 +187,8 @@ public void testCountValidTransactions() throws IOException { // Make sure that uncorrupted log has the expected length and number // of transactions. EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile); - assertEquals(NUM_TXNS + 2, validation.numTransactions); - assertEquals(validLength, validation.validLength); + assertEquals(NUM_TXNS + 2, validation.getNumTransactions()); + assertEquals(validLength, validation.getValidLength()); // Back up the uncorrupted log File logFileBak = new File(testDir, logFile.getName() + ".bak"); @@ -204,8 +204,8 @@ public void testCountValidTransactions() throws IOException { truncateFile(logFile, txOffset); validation = EditLogFileInputStream.validateEditLog(logFile); assertEquals("Failed when truncating to length " + txOffset, - txid - 1, validation.numTransactions); - assertEquals(txOffset, validation.validLength); + txid - 1, validation.getNumTransactions()); + assertEquals(txOffset, validation.getValidLength()); // Restore backup, truncate the file with one byte in the txn, // also isn't valid @@ -213,24 +213,24 @@ public void testCountValidTransactions() throws IOException { truncateFile(logFile, txOffset + 1); validation = EditLogFileInputStream.validateEditLog(logFile); assertEquals("Failed when truncating to length " + (txOffset + 1), - txid - 1, validation.numTransactions); - assertEquals(txOffset, validation.validLength); + txid - 1, validation.getNumTransactions()); + assertEquals(txOffset, validation.getValidLength()); // Restore backup, corrupt the txn opcode Files.copy(logFileBak, logFile); corruptByteInFile(logFile, txOffset); validation = EditLogFileInputStream.validateEditLog(logFile); assertEquals("Failed when corrupting txn opcode at " + txOffset, - txid - 1, validation.numTransactions); - assertEquals(txOffset, validation.validLength); + txid - 1, validation.getNumTransactions()); + assertEquals(txOffset, validation.getValidLength()); // Restore backup, corrupt a byte a few bytes into the txn Files.copy(logFileBak, logFile); corruptByteInFile(logFile, txOffset+5); validation = EditLogFileInputStream.validateEditLog(logFile); assertEquals("Failed when corrupting txn data at " + (txOffset+5), - txid - 1, validation.numTransactions); - assertEquals(txOffset, validation.validLength); + txid - 1, validation.getNumTransactions()); + assertEquals(txOffset, validation.getValidLength()); } // Corrupt the log at every offset to make sure that validation itself @@ -241,8 +241,8 @@ public void testCountValidTransactions() throws IOException { Files.copy(logFileBak, logFile); corruptByteInFile(logFile, offset); EditLogValidation val = EditLogFileInputStream.validateEditLog(logFile); - assertTrue(val.numTransactions >= prevNumValid); - prevNumValid = val.numTransactions; + assertTrue(val.getNumTransactions() >= prevNumValid); + prevNumValid = val.getNumTransactions(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java index 113dcbc339..649c415287 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java @@ -36,9 +36,6 @@ import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; -import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.TransactionalLoadPlan; -import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.LogGroup; -import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan; import org.junit.Test; import org.mockito.Mockito; @@ -63,335 +60,14 @@ public void testCurrentStorageInspector() throws IOException { "/foo/current/" + getInProgressEditsFileName(457)); inspector.inspectDirectory(mockDir); - mockLogValidation(inspector, - "/foo/current/" + getInProgressEditsFileName(457), 10); - - assertEquals(2, inspector.foundEditLogs.size()); assertEquals(2, inspector.foundImages.size()); - assertTrue(inspector.foundEditLogs.get(1).isInProgress()); - + FSImageFile latestImage = inspector.getLatestImage(); assertEquals(456, latestImage.txId); assertSame(mockDir, latestImage.sd); assertTrue(inspector.isUpgradeFinalized()); - LoadPlan plan = inspector.createLoadPlan(); - LOG.info("Plan: " + plan); - assertEquals(new File("/foo/current/"+getImageFileName(456)), - plan.getImageFile()); - assertArrayEquals(new File[] { - new File("/foo/current/" + getInProgressEditsFileName(457)) }, - plan.getEditsFiles().toArray(new File[0])); - } - - /** - * Test that we check for gaps in txids when devising a load plan. - */ - @Test - public void testPlanWithGaps() throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - - StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory( - NameNodeDirType.IMAGE_AND_EDITS, - false, - "/foo/current/" + getImageFileName(123), - "/foo/current/" + getImageFileName(456), - "/foo/current/" + getFinalizedEditsFileName(457,900), - "/foo/current/" + getFinalizedEditsFileName(901,950), - "/foo/current/" + getFinalizedEditsFileName(952,1000)); // <-- missing edit 951! - - inspector.inspectDirectory(mockDir); - try { - inspector.createLoadPlan(); - fail("Didn't throw IOE trying to load with gaps in edits"); - } catch (IOException ioe) { - assertTrue(ioe.getMessage().contains( - "would start at txid 951 but starts at txid 952")); - } - } - - /** - * Test the case where an in-progress log comes in the middle of a sequence - * of logs - */ - @Test - public void testPlanWithInProgressInMiddle() throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - - StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory( - NameNodeDirType.IMAGE_AND_EDITS, - false, - "/foo/current/" + getImageFileName(123), - "/foo/current/" + getImageFileName(456), - "/foo/current/" + getFinalizedEditsFileName(457,900), - "/foo/current/" + getInProgressEditsFileName(901), // <-- inprogress in middle - "/foo/current/" + getFinalizedEditsFileName(952,1000)); - - inspector.inspectDirectory(mockDir); - mockLogValidation(inspector, - "/foo/current/" + getInProgressEditsFileName(901), 51); - - LoadPlan plan = inspector.createLoadPlan(); - LOG.info("Plan: " + plan); - - assertEquals(new File("/foo/current/" + getImageFileName(456)), - plan.getImageFile()); - assertArrayEquals(new File[] { - new File("/foo/current/" + getFinalizedEditsFileName(457,900)), - new File("/foo/current/" + getInProgressEditsFileName(901)), - new File("/foo/current/" + getFinalizedEditsFileName(952,1000)) }, - plan.getEditsFiles().toArray(new File[0])); - - } - - - /** - * Test case for the usual case where no recovery of a log group is necessary - * (i.e all logs have the same start and end txids and finalized) - */ - @Test - public void testLogGroupRecoveryNoop() throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo1/current/" - + getFinalizedEditsFileName(123,456))); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo2/current/" - + getFinalizedEditsFileName(123,456))); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo3/current/" - + getFinalizedEditsFileName(123,456))); - LogGroup lg = inspector.logGroups.get(123L); - assertEquals(3, lg.logs.size()); - - lg.planRecovery(); - - assertFalse(lg.logs.get(0).isCorrupt()); - assertFalse(lg.logs.get(1).isCorrupt()); - assertFalse(lg.logs.get(2).isCorrupt()); - } - - /** - * Test case where we have some in-progress and some finalized logs - * for a given txid. - */ - @Test - public void testLogGroupRecoveryMixed() throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo1/current/" - + getFinalizedEditsFileName(123,456))); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo2/current/" - + getFinalizedEditsFileName(123,456))); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo3/current/" - + getInProgressEditsFileName(123))); - inspector.inspectDirectory(FSImageTestUtil.mockStorageDirectory( - NameNodeDirType.IMAGE, - false, - "/foo4/current/" + getImageFileName(122))); - - LogGroup lg = inspector.logGroups.get(123L); - assertEquals(3, lg.logs.size()); - EditLogFile inProgressLog = lg.logs.get(2); - assertTrue(inProgressLog.isInProgress()); - - LoadPlan plan = inspector.createLoadPlan(); - - // Check that it was marked corrupt. - assertFalse(lg.logs.get(0).isCorrupt()); - assertFalse(lg.logs.get(1).isCorrupt()); - assertTrue(lg.logs.get(2).isCorrupt()); - - - // Calling recover should move it aside - inProgressLog = spy(inProgressLog); - Mockito.doNothing().when(inProgressLog).moveAsideCorruptFile(); - lg.logs.set(2, inProgressLog); - - plan.doRecovery(); - - Mockito.verify(inProgressLog).moveAsideCorruptFile(); - } - - /** - * Test case where we have finalized logs with different end txids - */ - @Test - public void testLogGroupRecoveryInconsistentEndTxIds() throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo1/current/" - + getFinalizedEditsFileName(123,456))); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo2/current/" - + getFinalizedEditsFileName(123,678))); - - LogGroup lg = inspector.logGroups.get(123L); - assertEquals(2, lg.logs.size()); - - try { - lg.planRecovery(); - fail("Didn't throw IOE on inconsistent end txids"); - } catch (IOException ioe) { - assertTrue(ioe.getMessage().contains("More than one ending txid")); - } - } - - /** - * Test case where we have only in-progress logs and need to synchronize - * based on valid length. - */ - @Test - public void testLogGroupRecoveryInProgress() throws IOException { - String paths[] = new String[] { - "/foo1/current/" + getInProgressEditsFileName(123), - "/foo2/current/" + getInProgressEditsFileName(123), - "/foo3/current/" + getInProgressEditsFileName(123) - }; - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - inspector.inspectDirectory(mockDirectoryWithEditLogs(paths[0])); - inspector.inspectDirectory(mockDirectoryWithEditLogs(paths[1])); - inspector.inspectDirectory(mockDirectoryWithEditLogs(paths[2])); - - // Inject spies to return the valid counts we would like to see - mockLogValidation(inspector, paths[0], 2000); - mockLogValidation(inspector, paths[1], 2000); - mockLogValidation(inspector, paths[2], 1000); - - LogGroup lg = inspector.logGroups.get(123L); - assertEquals(3, lg.logs.size()); - - lg.planRecovery(); - - // Check that the short one was marked corrupt - assertFalse(lg.logs.get(0).isCorrupt()); - assertFalse(lg.logs.get(1).isCorrupt()); - assertTrue(lg.logs.get(2).isCorrupt()); - - // Calling recover should move it aside - EditLogFile badLog = lg.logs.get(2); - Mockito.doNothing().when(badLog).moveAsideCorruptFile(); - Mockito.doNothing().when(lg.logs.get(0)).finalizeLog(); - Mockito.doNothing().when(lg.logs.get(1)).finalizeLog(); - - lg.recover(); - - Mockito.verify(badLog).moveAsideCorruptFile(); - Mockito.verify(lg.logs.get(0)).finalizeLog(); - Mockito.verify(lg.logs.get(1)).finalizeLog(); - } - - /** - * Mock out the log at the given path to return a specified number - * of transactions upon validation. - */ - private void mockLogValidation( - FSImageTransactionalStorageInspector inspector, - String path, int numValidTransactions) throws IOException { - - for (LogGroup lg : inspector.logGroups.values()) { - List logs = lg.logs; - for (int i = 0; i < logs.size(); i++) { - EditLogFile log = logs.get(i); - if (log.getFile().getPath().equals(path)) { - // mock out its validation - EditLogFile spyLog = spy(log); - doReturn(new FSEditLogLoader.EditLogValidation(-1, numValidTransactions)) - .when(spyLog).validateLog(); - logs.set(i, spyLog); - return; - } - } - } - fail("No log found to mock out at " + path); - } - - /** - * Test when edits and image are in separate directories. - */ - @Test - public void testCurrentSplitEditsAndImage() throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - - StorageDirectory mockImageDir = FSImageTestUtil.mockStorageDirectory( - NameNodeDirType.IMAGE, - false, - "/foo/current/" + getImageFileName(123)); - StorageDirectory mockImageDir2 = FSImageTestUtil.mockStorageDirectory( - NameNodeDirType.IMAGE, - false, - "/foo2/current/" + getImageFileName(456)); - StorageDirectory mockEditsDir = FSImageTestUtil.mockStorageDirectory( - NameNodeDirType.EDITS, - false, - "/foo3/current/" + getFinalizedEditsFileName(123, 456), - "/foo3/current/" + getInProgressEditsFileName(457)); - - inspector.inspectDirectory(mockImageDir); - inspector.inspectDirectory(mockEditsDir); - inspector.inspectDirectory(mockImageDir2); - - mockLogValidation(inspector, - "/foo3/current/" + getInProgressEditsFileName(457), 2); - - assertEquals(2, inspector.foundEditLogs.size()); - assertEquals(2, inspector.foundImages.size()); - assertTrue(inspector.foundEditLogs.get(1).isInProgress()); - assertTrue(inspector.isUpgradeFinalized()); - - // Check plan - TransactionalLoadPlan plan = - (TransactionalLoadPlan)inspector.createLoadPlan(); - FSImageFile pickedImage = plan.image; - assertEquals(456, pickedImage.txId); - assertSame(mockImageDir2, pickedImage.sd); - assertEquals(new File("/foo2/current/" + getImageFileName(456)), - plan.getImageFile()); - assertArrayEquals(new File[] { - new File("/foo3/current/" + getInProgressEditsFileName(457)) - }, plan.getEditsFiles().toArray(new File[0])); - } - - /** - * Test case where an in-progress log is in an earlier name directory - * than a finalized log. Previously, getEditLogManifest wouldn't - * see this log. - */ - @Test - public void testLogManifestInProgressComesFirst() throws IOException { - FSImageTransactionalStorageInspector inspector = - new FSImageTransactionalStorageInspector(); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo1/current/" - + getFinalizedEditsFileName(2622,2623), - "/foo1/current/" - + getFinalizedEditsFileName(2624,2625), - "/foo1/current/" - + getInProgressEditsFileName(2626))); - inspector.inspectDirectory( - mockDirectoryWithEditLogs("/foo2/current/" - + getFinalizedEditsFileName(2622,2623), - "/foo2/current/" - + getFinalizedEditsFileName(2624,2625), - "/foo2/current/" - + getFinalizedEditsFileName(2626,2627), - "/foo2/current/" - + getFinalizedEditsFileName(2628,2629))); - } - - static StorageDirectory mockDirectoryWithEditLogs(String... fileNames) { - return FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS, false, fileNames); + latestImage.getFile()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index 748caf4d4c..d2f9781bed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -19,17 +19,277 @@ import static org.junit.Assert.*; -import java.io.IOException; +import java.net.URI; +import java.util.Collections; +import java.util.Arrays; +import java.util.List; +import java.util.ArrayList; +import java.util.Iterator; +import java.io.RandomAccessFile; +import java.io.File; +import java.io.FilenameFilter; +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.security.SecurityUtil; +import org.junit.Test; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL; +import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL; +import com.google.common.collect.ImmutableList; import com.google.common.base.Joiner; +import java.util.zip.CheckedInputStream; +import java.util.zip.Checksum; + public class TestFileJournalManager { + /** + * Test the normal operation of loading transactions from + * file journal manager. 3 edits directories are setup without any + * failures. Test that we read in the expected number of transactions. + */ + @Test + public void testNormalOperation() throws IOException { + File f1 = new File(TestEditLog.TEST_DIR + "/normtest0"); + File f2 = new File(TestEditLog.TEST_DIR + "/normtest1"); + File f3 = new File(TestEditLog.TEST_DIR + "/normtest2"); + + List editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI()); + NNStorage storage = setupEdits(editUris, 5); + + long numJournals = 0; + for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) { + FileJournalManager jm = new FileJournalManager(sd); + assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1)); + numJournals++; + } + assertEquals(3, numJournals); + } + + /** + * Test that inprogress files are handled correct. Set up a single + * edits directory. Fail on after the last roll. Then verify that the + * logs have the expected number of transactions. + */ + @Test + public void testInprogressRecovery() throws IOException { + File f = new File(TestEditLog.TEST_DIR + "/filejournaltest0"); + // abort after the 5th roll + NNStorage storage = setupEdits(Collections.singletonList(f.toURI()), + 5, new AbortSpec(5, 0)); + StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); + + FileJournalManager jm = new FileJournalManager(sd); + assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, + jm.getNumberOfTransactions(1)); + } + + /** + * Test a mixture of inprogress files and finalised. Set up 3 edits + * directories and fail the second on the last roll. Verify that reading + * the transactions, reads from the finalised directories. + */ + @Test + public void testInprogressRecoveryMixed() throws IOException { + File f1 = new File(TestEditLog.TEST_DIR + "/mixtest0"); + File f2 = new File(TestEditLog.TEST_DIR + "/mixtest1"); + File f3 = new File(TestEditLog.TEST_DIR + "/mixtest2"); + + List editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI()); + + // abort after the 5th roll + NNStorage storage = setupEdits(editUris, + 5, new AbortSpec(5, 1)); + Iterator dirs = storage.dirIterator(NameNodeDirType.EDITS); + StorageDirectory sd = dirs.next(); + FileJournalManager jm = new FileJournalManager(sd); + assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1)); + + sd = dirs.next(); + jm = new FileJournalManager(sd); + assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1)); + + sd = dirs.next(); + jm = new FileJournalManager(sd); + assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1)); + } + + /** + * Test that FileJournalManager behaves correctly despite inprogress + * files in all its edit log directories. Set up 3 directories and fail + * all on the last roll. Verify that the correct number of transaction + * are then loaded. + */ + @Test + public void testInprogressRecoveryAll() throws IOException { + File f1 = new File(TestEditLog.TEST_DIR + "/failalltest0"); + File f2 = new File(TestEditLog.TEST_DIR + "/failalltest1"); + File f3 = new File(TestEditLog.TEST_DIR + "/failalltest2"); + + List editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI()); + // abort after the 5th roll + NNStorage storage = setupEdits(editUris, 5, + new AbortSpec(5, 0), + new AbortSpec(5, 1), + new AbortSpec(5, 2)); + Iterator dirs = storage.dirIterator(NameNodeDirType.EDITS); + StorageDirectory sd = dirs.next(); + FileJournalManager jm = new FileJournalManager(sd); + assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1)); + + sd = dirs.next(); + jm = new FileJournalManager(sd); + assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1)); + + sd = dirs.next(); + jm = new FileJournalManager(sd); + assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1)); + } + + /** + * Corrupt an edit log file after the start segment transaction + */ + private void corruptAfterStartSegment(File f) throws IOException { + RandomAccessFile raf = new RandomAccessFile(f, "rw"); + raf.seek(0x16); // skip version and first tranaction and a bit of next transaction + for (int i = 0; i < 1000; i++) { + raf.writeInt(0xdeadbeef); + } + raf.close(); + } + + /** + * Test that we can read from a stream created by FileJournalManager. + * Create a single edits directory, failing it on the final roll. + * Then try loading from the point of the 3rd roll. Verify that we read + * the correct number of transactions from this point. + */ + @Test + public void testReadFromStream() throws IOException { + File f = new File(TestEditLog.TEST_DIR + "/filejournaltest1"); + // abort after 10th roll + NNStorage storage = setupEdits(Collections.singletonList(f.toURI()), + 10, new AbortSpec(10, 0)); + StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); + + FileJournalManager jm = new FileJournalManager(sd); + long expectedTotalTxnCount = TXNS_PER_ROLL*10 + TXNS_PER_FAIL; + assertEquals(expectedTotalTxnCount, jm.getNumberOfTransactions(1)); + + long skippedTxns = (3*TXNS_PER_ROLL); // skip first 3 files + long startingTxId = skippedTxns + 1; + + long numTransactionsToLoad = jm.getNumberOfTransactions(startingTxId); + long numLoaded = 0; + while (numLoaded < numTransactionsToLoad) { + EditLogInputStream editIn = jm.getInputStream(startingTxId); + FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(editIn); + long count = val.getNumTransactions(); + + editIn.close(); + startingTxId += count; + numLoaded += count; + } + + assertEquals(expectedTotalTxnCount - skippedTxns, numLoaded); + } + + /** + * Try to make a request with a start transaction id which doesn't + * match the start ID of some log segment. + * This should fail as edit logs must currently be treated as indevisable + * units. + */ + @Test(expected=IOException.class) + public void testAskForTransactionsMidfile() throws IOException { + File f = new File(TestEditLog.TEST_DIR + "/filejournaltest2"); + NNStorage storage = setupEdits(Collections.singletonList(f.toURI()), + 10); + StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); + + FileJournalManager jm = new FileJournalManager(sd); + jm.getNumberOfTransactions(2); + } + + /** + * Test that we receive the correct number of transactions when we count + * the number of transactions around gaps. + * Set up a single edits directory, with no failures. Delete the 4th logfile. + * Test that getNumberOfTransactions returns the correct number of + * transactions before this gap and after this gap. Also verify that if you + * try to count on the gap that an exception is thrown. + */ + @Test + public void testManyLogsWithGaps() throws IOException { + File f = new File(TestEditLog.TEST_DIR + "/filejournaltest3"); + NNStorage storage = setupEdits(Collections.singletonList(f.toURI()), 10); + StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); + + final long startGapTxId = 3*TXNS_PER_ROLL + 1; + final long endGapTxId = 4*TXNS_PER_ROLL; + File[] files = new File(f, "current").listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) { + return true; + } + return false; + } + }); + assertEquals(1, files.length); + assertTrue(files[0].delete()); + + FileJournalManager jm = new FileJournalManager(sd); + assertEquals(startGapTxId-1, jm.getNumberOfTransactions(1)); + + try { + jm.getNumberOfTransactions(startGapTxId); + fail("Should have thrown an exception by now"); + } catch (IOException ioe) { + assertTrue(true); + } + + // rolled 10 times so there should be 11 files. + assertEquals(11*TXNS_PER_ROLL - endGapTxId, + jm.getNumberOfTransactions(endGapTxId+1)); + } + + /** + * Test that we can load an edits directory with a corrupt inprogress file. + * The corrupt inprogress file should be moved to the side. + */ + @Test + public void testManyLogsWithCorruptInprogress() throws IOException { + File f = new File(TestEditLog.TEST_DIR + "/filejournaltest5"); + NNStorage storage = setupEdits(Collections.singletonList(f.toURI()), 10, new AbortSpec(10, 0)); + StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); + + File[] files = new File(f, "current").listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + if (name.startsWith("edits_inprogress")) { + return true; + } + return false; + } + }); + assertEquals(files.length, 1); + + corruptAfterStartSegment(files[0]); + + FileJournalManager jm = new FileJournalManager(sd); + assertEquals(10*TXNS_PER_ROLL+1, + jm.getNumberOfTransactions(1)); + } + @Test public void testGetRemoteEditLog() throws IOException { StorageDirectory sd = FSImageTestUtil.mockStorageDirectory( @@ -58,5 +318,4 @@ private static String getLogsAsString( FileJournalManager fjm, long firstTxId) throws IOException { return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId)); } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 647e47d61d..de55d88467 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.tools.DFSck; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -481,7 +482,7 @@ public void testFsckListCorruptFilesBlocks() throws Exception { } // wait for the namenode to see the corruption - final NameNode namenode = cluster.getNameNode(); + final NamenodeProtocols namenode = cluster.getNameNodeRpc(); CorruptFileBlocks corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.getFiles().length; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index ebd4a48ae2..65ec3b4ad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; @@ -52,7 +53,7 @@ public class TestHDFSConcat { private static final short REPL_FACTOR = 2; private MiniDFSCluster cluster; - private NameNode nn; + private NamenodeProtocols nn; private DistributedFileSystem dfs; private static long blockSize = 512; @@ -72,7 +73,7 @@ public void startUpCluster() throws IOException { cluster.waitClusterUp(); dfs = (DistributedFileSystem) cluster.getFileSystem(); assertNotNull("Failed to get FileSystem", dfs); - nn = cluster.getNameNode(); + nn = cluster.getNameNodeRpc(); assertNotNull("Failed to get NameNode", nn); } @@ -283,7 +284,7 @@ public void testConcatNotCompleteBlock() throws IOException { Path filePath1 = new Path(name1); DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1); - HdfsFileStatus fStatus = cluster.getNameNode().getFileInfo(name1); + HdfsFileStatus fStatus = nn.getFileInfo(name1); long fileLen = fStatus.getLen(); assertEquals(fileLen, trgFileLen); @@ -293,11 +294,11 @@ public void testConcatNotCompleteBlock() throws IOException { stm.readFully(0, byteFile1); stm.close(); - LocatedBlocks lb1 = cluster.getNameNode().getBlockLocations(name1, 0, trgFileLen); + LocatedBlocks lb1 = nn.getBlockLocations(name1, 0, trgFileLen); Path filePath2 = new Path(name2); DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1); - fStatus = cluster.getNameNode().getFileInfo(name2); + fStatus = nn.getFileInfo(name2); fileLen = fStatus.getLen(); assertEquals(srcFileLen, fileLen); @@ -307,7 +308,7 @@ public void testConcatNotCompleteBlock() throws IOException { stm.readFully(0, byteFile2); stm.close(); - LocatedBlocks lb2 = cluster.getNameNode().getBlockLocations(name2, 0, srcFileLen); + LocatedBlocks lb2 = nn.getBlockLocations(name2, 0, srcFileLen); System.out.println("trg len="+trgFileLen+"; src len="+srcFileLen); @@ -316,7 +317,7 @@ public void testConcatNotCompleteBlock() throws IOException { dfs.concat(filePath1, new Path [] {filePath2}); long totalLen = trgFileLen + srcFileLen; - fStatus = cluster.getNameNode().getFileInfo(name1); + fStatus = nn.getFileInfo(name1); fileLen = fStatus.getLen(); // read the resulting file @@ -325,7 +326,7 @@ public void testConcatNotCompleteBlock() throws IOException { stm.readFully(0, byteFileConcat); stm.close(); - LocatedBlocks lbConcat = cluster.getNameNode().getBlockLocations(name1, 0, fileLen); + LocatedBlocks lbConcat = nn.getBlockLocations(name1, 0, fileLen); //verifications // 1. number of blocks @@ -337,7 +338,7 @@ public void testConcatNotCompleteBlock() throws IOException { assertEquals(fileLen, totalLen); // 3. removal of the src file - fStatus = cluster.getNameNode().getFileInfo(name2); + fStatus = nn.getFileInfo(name2); assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist // 4. content diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 4005579386..c285c667aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.StringUtils; /** @@ -147,8 +147,8 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 0f); cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build(); - cluster.getNameNode(). - setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + cluster.getNameNodeRpc(). + setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); FileSystem fs = cluster.getFileSystem(); // create two files with one block each @@ -244,8 +244,8 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { cluster.getNameNode().isInSafeMode()); // now leave safe mode so that we can clean up - cluster.getNameNode(). - setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE); + cluster.getNameNodeRpc(). + setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); util.cleanup(fs, "/srcdat10"); } catch (Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index 498e815733..aad8d7dc0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.junit.Test; import com.google.common.base.Joiner; @@ -148,8 +148,8 @@ public void testPurgingWithNameEditsDirAfterFailure() private static void doSaveNamespace(NameNode nn) throws IOException { LOG.info("Saving namespace..."); - nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); - nn.saveNamespace(); - nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + nn.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER); + nn.getRpcServer().saveNamespace(); + nn.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 31a77778bf..b024bab1d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -20,6 +20,7 @@ import junit.framework.TestCase; import java.io.*; import java.util.Random; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -80,10 +81,12 @@ void checkImageAndEditsFilesExistence(File dir, assertTrue("Expect no images in " + dir, ins.foundImages.isEmpty()); } + List editlogs + = FileJournalManager.matchEditLogs(new File(dir, "current").listFiles()); if (shouldHaveEdits) { - assertTrue("Expect edits in " + dir, ins.foundEditLogs.size() > 0); + assertTrue("Expect edits in " + dir, editlogs.size() > 0); } else { - assertTrue("Expect no edits in " + dir, ins.foundEditLogs.isEmpty()); + assertTrue("Expect no edits in " + dir, editlogs.isEmpty()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java index 2338f55666..6f2aada534 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.Assert; @@ -54,7 +55,7 @@ public void tearDown() throws Exception { @Test public void testDelegationToken() throws IOException, InterruptedException { - NameNode nn = cluster.getNameNode(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); HttpServletRequest request = mock(HttpServletRequest.class); UserGroupInformation ugi = UserGroupInformation.createRemoteUser("auser"); String tokenString = NamenodeJspHelper.getDelegationToken(nn, request, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java index 4719467f92..b62dcc1bd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import java.util.Collections; @@ -108,7 +108,7 @@ public void testRestartDFS() throws Exception { files.cleanup(fs, dir); files.createFiles(fs, dir); fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); - cluster.getNameNode().saveNamespace(); + cluster.getNameNodeRpc().saveNamespace(); final String checkAfterModify = checkImages(fsn, numNamenodeDirs); assertFalse("Modified namespace should change fsimage contents. " + "was: " + checkAfterRestart + " now: " + checkAfterModify, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java index 1f953bbf92..da2bf4e22e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index c68f7eaab6..9365c6ef04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.log4j.Level; import org.junit.Test; @@ -79,7 +79,7 @@ public FaultySaveImage(boolean throwRTE) { public Void answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); - StorageDirectory sd = (StorageDirectory)args[0]; + StorageDirectory sd = (StorageDirectory)args[1]; if (count++ == 1) { LOG.info("Injecting fault for sd: " + sd); @@ -106,7 +106,7 @@ private void saveNamespaceWithInjectedFault(Fault fault) throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); - FSNamesystem fsn = new FSNamesystem(conf); + FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy FSImage originalImage = fsn.dir.fsImage; @@ -124,19 +124,22 @@ private void saveNamespaceWithInjectedFault(Fault fault) throws Exception { case SAVE_SECOND_FSIMAGE_RTE: // The spy throws a RuntimeException when writing to the second directory doAnswer(new FaultySaveImage(true)). - when(spyImage).saveFSImage((StorageDirectory)anyObject(), anyLong()); + when(spyImage).saveFSImage(Mockito.eq(fsn), + (StorageDirectory)anyObject(), anyLong()); shouldFail = false; break; case SAVE_SECOND_FSIMAGE_IOE: // The spy throws an IOException when writing to the second directory doAnswer(new FaultySaveImage(false)). - when(spyImage).saveFSImage((StorageDirectory)anyObject(), anyLong()); + when(spyImage).saveFSImage(Mockito.eq(fsn), + (StorageDirectory)anyObject(), anyLong()); shouldFail = false; break; case SAVE_ALL_FSIMAGES: // The spy throws IOException in all directories doThrow(new RuntimeException("Injected")). - when(spyImage).saveFSImage((StorageDirectory)anyObject(), anyLong()); + when(spyImage).saveFSImage(Mockito.eq(fsn), + (StorageDirectory)anyObject(), anyLong()); shouldFail = true; break; case WRITE_STORAGE_ALL: @@ -184,7 +187,7 @@ private void saveNamespaceWithInjectedFault(Fault fault) throws Exception { // Start a new namesystem, which should be able to recover // the namespace from the previous incarnation. - fsn = new FSNamesystem(conf); + fsn = FSNamesystem.loadFromDisk(conf); // Make sure the image loaded including our edits. checkEditExists(fsn, 1); @@ -209,7 +212,7 @@ public void testReinsertnamedirsInSavenamespace() throws Exception { NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); - FSNamesystem fsn = new FSNamesystem(conf); + FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy FSImage originalImage = fsn.dir.fsImage; @@ -263,7 +266,7 @@ public void testReinsertnamedirsInSavenamespace() throws Exception { // Start a new namesystem, which should be able to recover // the namespace from the previous incarnation. LOG.info("Loading new FSmage from disk."); - fsn = new FSNamesystem(conf); + fsn = FSNamesystem.loadFromDisk(conf); // Make sure the image loaded including our edit. LOG.info("Checking reloaded image."); @@ -344,7 +347,7 @@ public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure) Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); - FSNamesystem fsn = new FSNamesystem(conf); + FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy final FSImage originalImage = fsn.dir.fsImage; @@ -360,8 +363,9 @@ public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure) FSNamesystem.getNamespaceEditsDirs(conf)); doThrow(new IOException("Injected fault: saveFSImage")). - when(spyImage).saveFSImage((StorageDirectory)anyObject(), - Mockito.anyLong()); + when(spyImage).saveFSImage( + Mockito.eq(fsn), (StorageDirectory)anyObject(), + Mockito.anyLong()); try { doAnEdit(fsn, 1); @@ -390,7 +394,7 @@ public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure) // Start a new namesystem, which should be able to recover // the namespace from the previous incarnation. - fsn = new FSNamesystem(conf); + fsn = FSNamesystem.loadFromDisk(conf); // Make sure the image loaded including our edits. checkEditExists(fsn, 1); @@ -406,7 +410,7 @@ public void testSaveWhileEditsRolled() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); - FSNamesystem fsn = new FSNamesystem(conf); + FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); try { doAnEdit(fsn, 1); @@ -425,7 +429,7 @@ public void testSaveWhileEditsRolled() throws Exception { // Start a new namesystem, which should be able to recover // the namespace from the previous incarnation. - fsn = new FSNamesystem(conf); + fsn = FSNamesystem.loadFromDisk(conf); // Make sure the image loaded including our edits. checkEditExists(fsn, 1); @@ -442,7 +446,7 @@ public void testTxIdPersistence() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); - FSNamesystem fsn = new FSNamesystem(conf); + FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); try { // We have a BEGIN_LOG_SEGMENT txn to start @@ -464,7 +468,7 @@ public void testTxIdPersistence() throws Exception { assertEquals(5, fsn.getEditLog().getLastWrittenTxId()); fsn = null; - fsn = new FSNamesystem(conf); + fsn = FSNamesystem.loadFromDisk(conf); // 1 more txn to start new segment on restart assertEquals(6, fsn.getEditLog().getLastWrittenTxId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 2734cd5db7..8948f7843e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.IMPORT; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import java.io.File; @@ -46,11 +46,12 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; @@ -379,9 +380,10 @@ public void testCompression() throws IOException { NameNode namenode = new NameNode(conf); namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true); - assertTrue(namenode.getFileInfo("/test").isDir()); - namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER); - namenode.saveNamespace(); + NamenodeProtocols nnRpc = namenode.getRpcServer(); + assertTrue(nnRpc.getFileInfo("/test").isDir()); + nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + nnRpc.saveNamespace(); namenode.stop(); namenode.join(); @@ -408,9 +410,10 @@ public void testCompression() throws IOException { private void checkNameSpace(Configuration conf) throws IOException { NameNode namenode = new NameNode(conf); - assertTrue(namenode.getFileInfo("/test").isDir()); - namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER); - namenode.saveNamespace(); + NamenodeProtocols nnRpc = namenode.getRpcServer(); + assertTrue(nnRpc.getFileInfo("/test").isDir()); + nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + nnRpc.saveNamespace(); namenode.stop(); namenode.join(); } @@ -515,7 +518,7 @@ public void testNNRestart() throws IOException, InterruptedException { cluster.waitActive(); cluster.restartNameNode(); - NameNode nn = cluster.getNameNode(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); assertNotNull(nn); Assert.assertTrue(cluster.isDataNodeUp()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java index 8cbcb21839..1a40159bc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 095c153fa2..11152883c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -339,7 +339,7 @@ public void testMultipleSecondaryCheckpoint() throws IOException { // Simulate a 2NN beginning a checkpoint, but not finishing. This will // cause name1 to be restored. - cluster.getNameNode().rollEditLog(); + cluster.getNameNodeRpc().rollEditLog(); printStorages(fsImage); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java index 94fa7c388a..45b3b02997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java @@ -74,9 +74,9 @@ public void testFilesInGetListingOps() throws Exception { createFile("/tmp1/t2", 3200, (short)3); createFile("/tmp2/t1", 3200, (short)3); createFile("/tmp2/t2", 3200, (short)3); - cluster.getNameNode().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false); + cluster.getNameNodeRpc().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false); assertCounter("FilesInGetListingOps", 2L, getMetrics(NN_METRICS)); - cluster.getNameNode().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false); + cluster.getNameNodeRpc().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false); assertCounter("FilesInGetListingOps", 4L, getMetrics(NN_METRICS)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 5fc96882f6..3c6adc2513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -39,7 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -124,12 +124,13 @@ private File initFsimage() throws IOException { } // Write results to the fsimage file - cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_ENTER); - cluster.getNameNode().saveNamespace(); + cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER); + cluster.getNameNodeRpc().saveNamespace(); // Determine location of fsimage file orig = FSImageTestUtil.findLatestImageFile( - cluster.getNameNode().getFSImage().getStorage().getStorageDir(0)); + FSImageTestUtil.getFSImage( + cluster.getNameNode()).getStorage().getStorageDir(0)); if (orig == null) { fail("Didn't generate or can't find fsimage"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index ca74e58d54..65fe23a022 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1,17 +1,4 @@ - -38 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index b79c0d077d..80ec80ecde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -436,7 +436,7 @@ RegexpComparator - ^ls: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^ls: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -887,7 +887,7 @@ RegexpComparator - ^ls: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^ls: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -961,19 +961,19 @@ RegexpComparator - ^120\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/data120bytes + ^120\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/data120bytes RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/data15bytes RegexpComparator - ^30\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/data30bytes + ^30\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/data30bytes RegexpComparator - ^60\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/data60bytes + ^60\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/data60bytes @@ -1030,19 +1030,19 @@ RegexpComparator - ^15( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data15bytes + ^15( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data15bytes RegexpComparator - ^30( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data30bytes + ^30( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data30bytes RegexpComparator - ^60( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data60bytes + ^60( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data60bytes RegexpComparator - ^120( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data120bytes + ^120( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data120bytes @@ -1079,19 +1079,19 @@ RegexpComparator - ^120\s+hdfs://\w+[.a-z]*:[0-9]*/data120bytes + ^120\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/data120bytes RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/data15bytes RegexpComparator - ^30\s+hdfs://\w+[.a-z]*:[0-9]*/data30bytes + ^30\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/data30bytes RegexpComparator - ^60\s+hdfs://\w+[.a-z]*:[0-9]*/data60bytes + ^60\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/data60bytes @@ -1153,19 +1153,19 @@ RegexpComparator - ^15( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data15bytes + ^15( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data15bytes RegexpComparator - ^30( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data30bytes + ^30( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data30bytes RegexpComparator - ^60( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data60bytes + ^60( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data60bytes RegexpComparator - ^120( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data120bytes + ^120( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data120bytes @@ -1182,7 +1182,7 @@ RegexpComparator - ^15( |\t)*hdfs://\w+[.a-z]*:[0-9]*/data15bytes + ^15( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/data15bytes @@ -1202,19 +1202,19 @@ RegexpComparator - ^15( |\t)*hdfs://\w+[.a-z]*:[0-9]*/data15bytes + ^15( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/data15bytes RegexpComparator - ^30( |\t)*hdfs://\w+[.a-z]*:[0-9]*/data30bytes + ^30( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/data30bytes RegexpComparator - ^60( |\t)*hdfs://\w+[.a-z]*:[0-9]*/data60bytes + ^60( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/data60bytes RegexpComparator - ^120( |\t)*hdfs://\w+[.a-z]*:[0-9]*/data120bytes + ^120( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/data120bytes @@ -1232,7 +1232,7 @@ RegexpComparator - ^15( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data15bytes + ^15( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data15bytes @@ -1253,19 +1253,19 @@ RegexpComparator - ^15( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data15bytes + ^15( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data15bytes RegexpComparator - ^30( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data30bytes + ^30( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data30bytes RegexpComparator - ^60( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data60bytes + ^60( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data60bytes RegexpComparator - ^120( |\t)*hdfs://\w+[.a-z]*:[0-9]*/dir0/data120bytes + ^120( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data120bytes @@ -1368,7 +1368,7 @@ RegexpComparator - ^450\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^450\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 @@ -1439,7 +1439,7 @@ RegexpComparator - ^450\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^450\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 @@ -1471,7 +1471,7 @@ RegexpComparator - ^450\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^450\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 @@ -1509,7 +1509,7 @@ RegexpComparator - ^450\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^450\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 @@ -1909,7 +1909,7 @@ RegexpComparator - ^ls: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^ls: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -1924,7 +1924,7 @@ RegexpComparator - ^mv: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^mv: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -1958,7 +1958,7 @@ RegexpComparator - ^ls: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^ls: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -2002,7 +2002,7 @@ RegexpComparator - ^ls: `hdfs://\w+[.a-z]*:[0-9]+/file\*': No such file or directory + ^ls: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file\*': No such file or directory @@ -2936,7 +2936,7 @@ RegexpComparator - ^cp: `hdfs://\w+[.a-z]*:[0-9]+/file': No such file or directory + ^cp: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file': No such file or directory @@ -2957,7 +2957,7 @@ RegexpComparator - ^cp: `hdfs://\w+[.a-z]*:[0-9]+/file5': Is not a directory + ^cp: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file5': Is not a directory @@ -2978,7 +2978,7 @@ RegexpComparator - ^cp: `hdfs://\w+[.a-z]*:[0-9]+/file5': Is not a directory + ^cp: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file5': Is not a directory @@ -2998,7 +2998,7 @@ RegexpComparator - ^cp: `hdfs://\w+[.a-z]*:[0-9]+/dir': No such file or directory + ^cp: `hdfs://\w+[-.a-z0-9]*:[0-9]+/dir': No such file or directory @@ -3018,7 +3018,7 @@ RegexpComparator - ^cp: `hdfs://\w+[.a-z]*:[0-9]+/dir': No such file or directory + ^cp: `hdfs://\w+[-.a-z0-9]*:[0-9]+/dir': No such file or directory @@ -3159,19 +3159,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0/file0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/file0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0/file1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/file1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0/file2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/file2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0/file3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/file3 @@ -3191,19 +3191,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file3 @@ -3306,19 +3306,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 @@ -3401,7 +3401,7 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file0 @@ -3421,19 +3421,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 @@ -3450,7 +3450,7 @@ RegexpComparator - rm: `hdfs://\w+[.a-z]*:[0-9]*/dir0': No such file or directory + rm: `hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0': No such file or directory @@ -3467,7 +3467,7 @@ RegexpComparator - ^rm: `hdfs://\w+[.a-z]*:[0-9]*/file0': No such file or directory + ^rm: `hdfs://\w+[-.a-z0-9]*:[0-9]*/file0': No such file or directory @@ -3487,19 +3487,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 @@ -3588,19 +3588,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 @@ -3620,19 +3620,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir3 @@ -3718,19 +3718,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 @@ -3795,7 +3795,7 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/file0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/file0 @@ -3812,7 +3812,7 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 @@ -3832,19 +3832,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 @@ -3860,7 +3860,7 @@ RegexpComparator - ^rm: `hdfs://\w+[.a-z]*:[0-9]*/file0': No such file or directory + ^rm: `hdfs://\w+[-.a-z0-9]*:[0-9]*/file0': No such file or directory @@ -3880,19 +3880,19 @@ RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir1 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir2 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ^Deleted hdfs://\w+[.a-z]*:[0-9]*/dir3 + ^Deleted hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 @@ -4316,7 +4316,7 @@ RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/data15bytes @@ -4333,23 +4333,23 @@ RegexpComparator - ^120\s+hdfs://\w+[.a-z]*:[0-9]*/dir1/data/data120bytes + ^120\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1/data/data120bytes RegexpComparator - ^1065\s+hdfs://\w+[.a-z]*:[0-9]*/dir1/data/data1k + ^1065\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1/data/data1k RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/dir1/data/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1/data/data15bytes RegexpComparator - ^30\s+hdfs://\w+[.a-z]*:[0-9]*/dir1/data/data30bytes + ^30\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1/data/data30bytes RegexpComparator - ^60\s+hdfs://\w+[.a-z]*:[0-9]*/dir1/data/data60bytes + ^60\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1/data/data60bytes @@ -4367,11 +4367,11 @@ RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/dir0/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data15bytes RegexpComparator - ^30\s+hdfs://\w+[.a-z]*:[0-9]*/dir0/data30bytes + ^30\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data30bytes @@ -4404,7 +4404,7 @@ RegexpComparator - ^put: `hdfs://\w+[.a-z]*:[0-9]+/user/file0': File exists + ^put: `hdfs://\w+[-.a-z0-9]*:[0-9]+/user/file0': File exists @@ -4421,7 +4421,7 @@ RegexpComparator - ^put: `hdfs://\w+[.a-z]*:[0-9]+/file0': Is not a directory + ^put: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file0': Is not a directory @@ -4437,7 +4437,7 @@ RegexpComparator - ^put: `hdfs://\w+[.a-z]*:[0-9]+/wrongdir': No such file or directory + ^put: `hdfs://\w+[-.a-z0-9]*:[0-9]+/wrongdir': No such file or directory @@ -4839,7 +4839,7 @@ RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/data15bytes @@ -4859,7 +4859,7 @@ RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/dir0/dir1/data/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/dir1/data/data15bytes @@ -4877,11 +4877,11 @@ RegexpComparator - ^15\s+hdfs://\w+[.a-z]*:[0-9]*/dir0/data15bytes + ^15\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data15bytes RegexpComparator - ^30\s+hdfs://\w+[.a-z]*:[0-9]*/dir0/data30bytes + ^30\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/data30bytes @@ -4914,7 +4914,7 @@ RegexpComparator - copyFromLocal: `hdfs://\w+[.a-z]*:[0-9]+/user/file0': File exists + copyFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/user/file0': File exists @@ -4931,7 +4931,7 @@ RegexpComparator - ^copyFromLocal: `hdfs://\w+[.a-z]*:[0-9]+/file0': Is not a directory + ^copyFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file0': Is not a directory @@ -4947,7 +4947,7 @@ RegexpComparator - ^copyFromLocal: `hdfs://\w+[.a-z]*:[0-9]+/wrongdir': No such file or directory + ^copyFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/wrongdir': No such file or directory @@ -5365,7 +5365,7 @@ RegexpComparator - ^cat: `hdfs://\w+[.a-z]*:[0-9]*/file': No such file or directory + ^cat: `hdfs://\w+[-.a-z0-9]*:[0-9]*/file': No such file or directory @@ -5382,7 +5382,7 @@ RegexpComparator - ^cat: `hdfs://\w+[.a-z]*:[0-9]*/dir1': Is a directory + ^cat: `hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1': Is a directory @@ -5505,19 +5505,19 @@ RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir1 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir2 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir3 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 @@ -5537,19 +5537,19 @@ RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir0 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir0 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir2 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir2 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir3 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir3 @@ -5617,19 +5617,19 @@ RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir1 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir2 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir3 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 @@ -5680,7 +5680,7 @@ RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 @@ -5697,19 +5697,19 @@ RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir0 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir1 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir2 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/dir3 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 @@ -5726,7 +5726,7 @@ RegexpComparator - mkdir: `hdfs://\w+[.a-z]*:[0-9]+/dir0': File exists + mkdir: `hdfs://\w+[-.a-z0-9]*:[0-9]+/dir0': File exists @@ -5743,7 +5743,7 @@ RegexpComparator - mkdir: `hdfs://\w+[.a-z]*:[0-9]+/data15bytes': Is not a directory + mkdir: `hdfs://\w+[-.a-z0-9]*:[0-9]+/data15bytes': Is not a directory @@ -5926,7 +5926,7 @@ RegexpComparator - ^Replication 2 set: hdfs://\w+[.a-z]*:[0-9]*/dir0/file0 + ^Replication 2 set: hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/file0 @@ -5944,11 +5944,11 @@ RegexpComparator - ^Replication 2 set: hdfs://\w+[.a-z]*:[0-9]*/dir0/file0 + ^Replication 2 set: hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/file0 RegexpComparator - ^Replication 2 set: hdfs://\w+[.a-z]*:[0-9]*/dir0/file1 + ^Replication 2 set: hdfs://\w+[-.a-z0-9]*:[0-9]*/dir0/file1 @@ -5964,7 +5964,7 @@ RegexpComparator - ^setrep: `hdfs://\w+[.a-z]*:[0-9]+/dir0/file': No such file or directory + ^setrep: `hdfs://\w+[-.a-z0-9]*:[0-9]+/dir0/file': No such file or directory @@ -6016,9 +6016,9 @@ RegexpComparator - ^0( |\t)*hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file0 - ^0( |\t)*hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 - ^0( |\t)*hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file2 + ^0( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file0 + ^0( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 + ^0( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file2 @@ -6069,9 +6069,9 @@ RegexpComparator - ^0( |\t)*hdfs://\w+[.a-z]*:[0-9]+/file0 - ^0( |\t)*hdfs://\w+[.a-z]*:[0-9]+/file1 - ^0( |\t)*hdfs://\w+[.a-z]*:[0-9]+/file2 + ^0( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]+/file0 + ^0( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]+/file1 + ^0( |\t)*hdfs://\w+[-.a-z0-9]*:[0-9]+/file2 @@ -6105,7 +6105,7 @@ RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]*/user/file0 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]*/user/file0 @@ -6122,9 +6122,9 @@ RegexpComparator - ^0\s+hdfs://\w+[.a-z]*:[0-9]+/file0 - ^0\s+hdfs://\w+[.a-z]*:[0-9]+/file1 - ^0\s+hdfs://\w+[.a-z]*:[0-9]+/file2 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]+/file0 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]+/file1 + ^0\s+hdfs://\w+[-.a-z0-9]*:[0-9]+/file2 @@ -6141,7 +6141,7 @@ RegexpComparator - touchz: `hdfs://\w+[.a-z]*:[0-9]+/data15bytes': Not a zero-length file + touchz: `hdfs://\w+[-.a-z0-9]*:[0-9]+/data15bytes': Not a zero-length file @@ -6254,7 +6254,7 @@ RegexpComparator - ^test: `hdfs://\w+[.a-z]*:[0-9]+/dir0/file': No such file or directory + ^test: `hdfs://\w+[-.a-z0-9]*:[0-9]+/dir0/file': No such file or directory @@ -6270,7 +6270,7 @@ RegexpComparator - ^test: `hdfs://\w+[.a-z]*:[0-9]+/dir': No such file or directory + ^test: `hdfs://\w+[-.a-z0-9]*:[0-9]+/dir': No such file or directory @@ -6618,7 +6618,7 @@ RegexpComparator - ^stat: `hdfs://\w+[.a-z]*:[0-9]+/file': No such file or directory + ^stat: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file': No such file or directory @@ -6863,7 +6863,7 @@ RegexpComparator - ^tail: `hdfs://\w+[.a-z]*:[0-9]+/file\*': No such file or directory + ^tail: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file\*': No such file or directory @@ -6878,7 +6878,7 @@ RegexpComparator - ^tail: `hdfs://\w+[.a-z]*:[0-9]+/file': No such file or directory + ^tail: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file': No such file or directory @@ -6895,7 +6895,7 @@ RegexpComparator - ^tail: `hdfs://\w+[.a-z]*:[0-9]+/dir1': Is a directory + ^tail: `hdfs://\w+[-.a-z0-9]*:[0-9]+/dir1': Is a directory @@ -6913,7 +6913,7 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 @@ -6930,7 +6930,7 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 @@ -6947,7 +6947,7 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 @@ -6964,7 +6964,7 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 @@ -7014,19 +7014,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7046,19 +7046,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file4 @@ -7078,19 +7078,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7110,19 +7110,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file4 @@ -7142,19 +7142,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -7174,19 +7174,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir4 @@ -7206,19 +7206,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -7238,19 +7238,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir4 @@ -7267,7 +7267,7 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 @@ -7284,7 +7284,7 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 @@ -7303,7 +7303,7 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 @@ -7322,7 +7322,7 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 @@ -7372,19 +7372,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7404,19 +7404,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file4 @@ -7436,19 +7436,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7468,19 +7468,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/file4 @@ -7508,19 +7508,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -7548,19 +7548,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir4 @@ -7588,19 +7588,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -7628,19 +7628,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/user/[a-z]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/user/[a-z]*/dir4 @@ -7657,7 +7657,7 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 @@ -7674,7 +7674,7 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 @@ -7709,19 +7709,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7741,19 +7741,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7773,19 +7773,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -7805,19 +7805,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -7834,7 +7834,7 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 @@ -7853,7 +7853,7 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 @@ -7888,19 +7888,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7920,19 +7920,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -7960,19 +7960,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -8000,19 +8000,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -8029,7 +8029,7 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 @@ -8046,7 +8046,7 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 @@ -8061,7 +8061,7 @@ RegexpComparator - count: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + count: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -8081,19 +8081,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -8113,19 +8113,19 @@ RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -8145,19 +8145,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -8177,19 +8177,19 @@ RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -8206,7 +8206,7 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 @@ -8225,7 +8225,7 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 @@ -8240,7 +8240,7 @@ RegexpComparator - count: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + count: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -8260,19 +8260,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -8292,19 +8292,19 @@ RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file1 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file1 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file2 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file2 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file3 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file3 RegexpComparator - ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/file4 + ( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/file4 @@ -8332,19 +8332,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -8372,19 +8372,19 @@ RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir1 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir1 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir2 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir2 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir3 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir3 RegexpComparator - ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[.a-z]*:[0-9]*/dir4 + ( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://\w+[-.a-z0-9]*:[0-9]*/dir4 @@ -11036,7 +11036,7 @@ RegexpComparator - ^chmod: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^chmod: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -11052,7 +11052,7 @@ RegexpComparator - ^chmod: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^chmod: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -13044,7 +13044,7 @@ RegexpComparator - ^chown: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^chown: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -14699,7 +14699,7 @@ RegexpComparator - ^chgrp: `hdfs://\w+[.a-z]*:[0-9]+/file1': No such file or directory + ^chgrp: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file1': No such file or directory @@ -15726,7 +15726,7 @@ RegexpComparator - Name: [0-9\.:]+ \([a-zA-z0-9\.]+\) + Name: [0-9\.:]+ \([-.a-zA-z0-9\.]+\) RegexpComparator @@ -15823,7 +15823,7 @@ RegexpComparator - Name: [0-9\.:]+ \([a-zA-z0-9\.]+\) + Name: [0-9\.:]+ \([-.a-zA-z0-9\.]+\) RegexpComparator @@ -15860,7 +15860,7 @@ RegexpComparator - Created file metafile on server hdfs:\/\/[a-zA-Z0-9\.:]+ + Created file metafile on server hdfs:\/\/[-.a-zA-Z0-9\.:]+ @@ -15896,19 +15896,19 @@ RegexpAcrossOutputComparator - ^Rack: \/rack1\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\) + ^Rack: \/rack1\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\)\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\) RegexpAcrossOutputComparator - Rack: \/rack2\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\) + Rack: \/rack2\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\)\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\)\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\) RegexpAcrossOutputComparator - Rack: \/rack3\s*127\.0\.0\.1:\d+\s\(localhost.*\) + Rack: \/rack3\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\) RegexpAcrossOutputComparator - Rack: \/rack4\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\) + Rack: \/rack4\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\)\s*127\.0\.0\.1:\d+\s\([-.a-zA-Z0-9]+\) @@ -16060,7 +16060,7 @@ RegexpComparator - ^moveFromLocal: `hdfs://\w+[.a-z]*:[0-9]+/file0': Is not a directory + ^moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file0': Is not a directory @@ -16076,7 +16076,7 @@ RegexpComparator - ^moveFromLocal: `hdfs://\w+[.a-z]*:[0-9]+/wrongdir': No such file or directory + ^moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/wrongdir': No such file or directory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index ac36029373..7dc5e86e68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java index 11f695d8f4..ba76a7bd19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java @@ -47,8 +47,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.After; @@ -84,7 +84,7 @@ public void startUp() throws IOException { FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); DFSTestUtil.formatNameNode(conf); - fsn = spy(new FSNamesystem(conf)); + fsn = spy(FSNamesystem.loadFromDisk(conf)); } /** @@ -159,7 +159,7 @@ public void testInternalReleaseLease_UNKNOWN_COMM () throws IOException { new PermissionStatus("test", "test", new FsPermission((short)0777)); mockFileBlocks(2, null, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); releaseLease(fsn, lm, file); fail("FSNamesystem.internalReleaseLease suppose to throw " + @@ -184,8 +184,8 @@ public void testInternalReleaseLease_COMM_COMM () throws IOException { PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.COMMITTED, file, dnd, ps, false); releaseLease(fsn, lm, file); fail("FSNamesystem.internalReleaseLease suppose to throw " + @@ -232,7 +232,7 @@ public void testInternalReleaseLease_1blocks () throws IOException { PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(1, null, HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false); + mockFileBlocks(1, null, HdfsServerConstants.BlockUCState.COMMITTED, file, dnd, ps, false); releaseLease(fsn, lm, file); fail("FSNamesystem.internalReleaseLease suppose to throw " + @@ -257,8 +257,8 @@ public void testInternalReleaseLease_COMM_CONSTRUCTION () throws IOException { PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); assertFalse("False is expected in return in this case", releaseLease(fsn, lm, file)); @@ -278,8 +278,8 @@ public void testCommitBlockSynchronization_BlockNotFound () PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); try { @@ -304,8 +304,8 @@ public void testCommitBlockSynchronization_notUR () PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.COMPLETE, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.COMPLETE, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(lastBlock.isComplete()).thenReturn(true); @@ -332,8 +332,8 @@ public void testCommitBlockSynchronization_WrongGreaterRecoveryID() PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId-100); @@ -360,8 +360,8 @@ public void testCommitBlockSynchronization_WrongLesserRecoveryID() PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId+100); @@ -388,8 +388,8 @@ public void testCommitBlockSynchronization_EqualRecoveryID() PermissionStatus ps = new PermissionStatus("test", "test", new FsPermission((short)0777)); - mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, - HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); + mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, + HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true); BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId); @@ -407,8 +407,8 @@ public void testCommitBlockSynchronization_EqualRecoveryID() } private void mockFileBlocks(int fileBlocksNumber, - HdfsConstants.BlockUCState penUltState, - HdfsConstants.BlockUCState lastState, + HdfsServerConstants.BlockUCState penUltState, + HdfsServerConstants.BlockUCState lastState, Path file, DatanodeDescriptor dnd, PermissionStatus ps, boolean setStoredBlock) throws IOException { @@ -428,7 +428,6 @@ private void mockFileBlocks(int fileBlocksNumber, when(fsn.getFSImage()).thenReturn(fsImage); when(fsn.getFSImage().getEditLog()).thenReturn(editLog); - fsn.getFSImage().setFSNamesystem(fsn); switch (fileBlocksNumber) { case 0: diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 28ba0bc152..58fa824442 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -2,6 +2,10 @@ Hadoop MapReduce Change Log Trunk (unreleased changes) + IMPROVEMENTS + + MAPREDUCE-2887 due to HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia) + Release 0.23.0 - Unreleased INCOMPATIBLE CHANGES @@ -226,6 +230,24 @@ Release 0.23.0 - Unreleased MAPREDUCE-2701. app/Job.java needs UGI for the user that launched it. (Robert Evans via mahadev) + MAPREDUCE-2652. Enabled multiple NMs to be runnable on a single node by + making shuffle service port to be truely configurable. (Robert Evans via + vinodkv) + + MAPREDUCE-2735. Add an applications summary log to ResourceManager. + (Thomas Graves via acmurthy) + + MAPREDUCE-2697. Enhance CapacityScheduler to cap concurrently running + applications per-queue & per-user. (acmurthy) + Configuration changes: + add yarn.capacity-scheduler.maximum-am-resource-percent + + MAPREDUCE-2774. Add startup message to ResourceManager & NodeManager on + startup. (Venu Gopala Rao via acmurthy) + + MAPREDUCE-2655. Add audit logs to ResourceManager and NodeManager. (Thomas + Graves via acmurthy) + OPTIMIZATIONS MAPREDUCE-2026. Make JobTracker.getJobCounters() and @@ -652,7 +674,6 @@ Release 0.23.0 - Unreleased MAPREDUCE-279. Fix in MR-279 branch. Distributed cache bug fix to pass Terasort. (vinodkv) - MAPREDUCE-279. Fix in MR-279 branch. Fix null pointer exception in kill task attempt (mahadev) @@ -1155,6 +1176,51 @@ Release 0.23.0 - Unreleased MAPREDUCE-2885. Fix mapred-config.sh to look for hadoop-config.sh in HADOOP_COMMON_HOME/libexec. (acmurthy) + MAPREDUCE-2893. Remove duplicate entry of YarnClientProtocolProvider in + ClientProtocolProvider services file. (Liang-Chi Hsieh via acmurthy) + + MAPREDUCE-2891. Javadoc for AMRMProtocol and related records. (acmurthy) + + MAPREDUCE-2898. Javadoc for ContainerManager protocol and related records. + (acmurthy) + + MAPREDUCE-2904. Fixed bin/yarn to correctly include HDFS jars and + clean up of stale refs to pre-mavenized Hadoop Common and HDFS. + (Sharad Agarwal and Arun C. Murthy via acmurthy) + + MAPREDUCE-2737. Update the progress of jobs on client side. (Siddharth Seth + and Mahadev Konar via mahadev) + + MAPREDUCE-2886. Fix Javadoc warnings in MapReduce. (mahadev) + + MAPREDUCE-2897. Javadoc for ClientRMProtocol protocol and related records. + (acmurthy) + + MAPREDUCE-2916. Ivy build for MRv1 fails with bad organization for + common daemon. (mahadev) + + MAPREDUCE-2917. Fixed corner case in container reservation which led to + starvation and hung jobs. (acmurthy) + + MAPREDUCE-2756. Better error handling in JobControl for failed jobs. + (Robert Evans via acmurthy) + + MAPREDUCE-2716. MRReliabilityTest job fails because of missing + job-file. (Jeffrey Naisbitt via vinodkv) + + MAPREDUCE-2882. TestLineRecordReader depends on ant jars. (todd) + + MAPREDUCE-2687. Fix NodeManager to use the right version of + LocalDirAllocator.getLocalPathToWrite. (mahadev & acmurthy) + + MAPREDUCE-2800. Set final progress for tasks to ensure all task information + is correctly logged to JobHistory. (Siddharth Seth via acmurthy) + + MAPREDUCE-2938. Log application submission failure in CapacityScheduler. + (acmurthy) + + MAPREDUCE-2948. Hadoop streaming test failure, post MR-2767 (mahadev) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES @@ -1179,6 +1245,8 @@ Release 0.22.0 - Unreleased MAPREDUCE-2169. Integrated Reed-Solomon code with RaidNode. (Ramkumar Vadali via schen) + MAPREDUCE-2936. Contrib Raid compilation broken after HDFS-1620. (vinodkv) + IMPROVEMENTS MAPREDUCE-2141. Add an "extra data" field to Task for use by Mesos. (matei) @@ -1727,6 +1795,8 @@ Release 0.22.0 - Unreleased MAPREDUCE-2571. CombineFileInputFormat.getSplits throws a java.lang.ArrayStoreException. (Bochun Bai via todd) + MAPREDUCE-2767. Remove Linux task-controller. (Milind Bhandarkar via shv) + Release 0.21.1 - Unreleased NEW FEATURES diff --git a/hadoop-mapreduce-project/build.xml b/hadoop-mapreduce-project/build.xml index 9acae22079..9d78196476 100644 --- a/hadoop-mapreduce-project/build.xml +++ b/hadoop-mapreduce-project/build.xml @@ -166,20 +166,6 @@ - - - - - - - - - @@ -710,8 +696,6 @@ - - - @@ -1277,7 +1260,6 @@ - @@ -1286,7 +1268,6 @@ - @@ -1769,7 +1750,6 @@ - @@ -1861,9 +1841,6 @@ - - - - - - - - - - @@ -2368,68 +2335,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index 5f664d1516..422f6ff6b3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -44,6 +44,7 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.AMConstants; import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.Job; @@ -198,9 +199,13 @@ public void run() { // after "launching," send launched event to task attempt to move // state from ASSIGNED to RUNNING (also nukes "remoteTask", so must // do getRemoteTask() call first) + + //There is no port number because we are not really talking to a task + // tracker. The shuffle is just done through local files. So the + // port number is set to -1 in this case. context.getEventHandler().handle( - new TaskAttemptEvent(attemptID, - TaskAttemptEventType.TA_CONTAINER_LAUNCHED)); //FIXME: race condition here? or do we have same kind of lock on TA handler => MapTask can't send TA_UPDATE before TA_CONTAINER_LAUNCHED moves TA to RUNNING state? (probably latter) + new TaskAttemptContainerLaunchedEvent(attemptID, -1)); + //FIXME: race condition here? or do we have same kind of lock on TA handler => MapTask can't send TA_UPDATE before TA_CONTAINER_LAUNCHED moves TA to RUNNING state? (probably latter) if (numMapTasks == 0) { doneWithMaps = true; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java index bae1136246..7092b6dbc0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java @@ -63,4 +63,9 @@ public interface TaskAttempt { * yet, returns 0. */ long getFinishTime(); + + /** + * @return the port shuffle is on. + */ + public int getShufflePort(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerLaunchedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerLaunchedEvent.java new file mode 100644 index 0000000000..68cb84ccc3 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerLaunchedEvent.java @@ -0,0 +1,45 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.mapreduce.v2.app.job.event; + +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; + +public class TaskAttemptContainerLaunchedEvent extends TaskAttemptEvent { + private int shufflePort; + + /** + * Create a new TaskAttemptEvent. + * @param id the id of the task attempt + * @param shufflePort the port that shuffle is listening on. + */ + public TaskAttemptContainerLaunchedEvent(TaskAttemptId id, int shufflePort) { + super(id, TaskAttemptEventType.TA_CONTAINER_LAUNCHED); + this.shufflePort = shufflePort; + } + + + /** + * Get the port that the shuffle handler is listening on. This is only + * valid if the type of the event is TA_CONTAINER_LAUNCHED + * @return the port the shuffle handler is listening on. + */ + public int getShufflePort() { + return shufflePort; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java index 41b6b3ef02..3a9851a617 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java @@ -28,7 +28,12 @@ public class TaskAttemptEvent extends AbstractEvent { private TaskAttemptId attemptID; - + + /** + * Create a new TaskAttemptEvent. + * @param id the id of the task attempt + * @param type the type of event that happened. + */ public TaskAttemptEvent(TaskAttemptId id, TaskAttemptEventType type) { super(type); this.attemptID = id; @@ -37,5 +42,4 @@ public TaskAttemptEvent(TaskAttemptId id, TaskAttemptEventType type) { public TaskAttemptId getTaskAttemptID() { return attemptID; } - } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index 302fd93751..c2a397502f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -599,6 +599,8 @@ public JobReport getReport() { report.setCleanupProgress(cleanupProgress); report.setMapProgress(computeProgress(mapTasks)); report.setReduceProgress(computeProgress(reduceTasks)); + report.setJobName(jobName); + report.setUser(username); return report; } finally { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index 87e0e082d9..98cf9fbd9f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -43,7 +43,6 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceChildJVM; -import org.apache.hadoop.mapred.ProgressSplitsBlock; import org.apache.hadoop.mapred.ShuffleHandler; import org.apache.hadoop.mapred.Task; import org.apache.hadoop.mapred.TaskAttemptContextImpl; @@ -65,7 +64,6 @@ import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; import org.apache.hadoop.mapreduce.v2.MRConstants; import org.apache.hadoop.mapreduce.v2.api.records.Counter; -import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup; import org.apache.hadoop.mapreduce.v2.api.records.Counters; import org.apache.hadoop.mapreduce.v2.api.records.Phase; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; @@ -80,6 +78,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; @@ -126,7 +125,6 @@ /** * Implementation of TaskAttempt interface. */ -@SuppressWarnings("all") public abstract class TaskAttemptImpl implements org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt, EventHandler { @@ -159,6 +157,7 @@ public abstract class TaskAttemptImpl implements private long launchTime; private long finishTime; private WrappedProgressSplitsBlock progressSplitBlock; + private int shufflePort = -1; private static final CleanupContainerTransition CLEANUP_CONTAINER_TRANSITION = new CleanupContainerTransition(); @@ -596,13 +595,10 @@ private ContainerLaunchContext createContainerLaunchContext() { // Add shuffle token LOG.info("Putting shuffle token in serviceData"); - DataOutputBuffer jobToken_dob = new DataOutputBuffer(); - jobToken.write(jobToken_dob); container .setServiceData( ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, - ByteBuffer.wrap(jobToken_dob.getData(), 0, - jobToken_dob.getLength())); + ShuffleHandler.serializeServiceData(jobToken)); MRApps.addToClassPath(container.getAllEnv(), getInitialClasspath()); } catch (IOException e) { @@ -784,6 +780,17 @@ public long getFinishTime() { } } + + @Override + public int getShufflePort() { + readLock.lock(); + try { + return shufflePort; + } finally { + readLock.unlock(); + } + } + /**If container Assigned then return the node's address, otherwise null. */ @Override @@ -1153,7 +1160,11 @@ private static class LaunchedContainerTransition implements SingleArcTransition { @Override public void transition(TaskAttemptImpl taskAttempt, - TaskAttemptEvent event) { + TaskAttemptEvent evnt) { + + TaskAttemptContainerLaunchedEvent event = + (TaskAttemptContainerLaunchedEvent) evnt; + //set the launch time taskAttempt.launchTime = taskAttempt.clock.getTime(); // register it to TaskAttemptListener so that it start listening @@ -1186,6 +1197,7 @@ public void transition(TaskAttemptImpl taskAttempt, //make remoteTask reference as null as it is no more needed //and free up the memory taskAttempt.remoteTask = null; + taskAttempt.shufflePort = event.getShufflePort(); //tell the Task that attempt has started taskAttempt.eventHandler.handle(new TaskTAttemptEvent( @@ -1368,6 +1380,8 @@ public void transition(TaskAttemptImpl taskAttempt, // for it taskAttempt.taskAttemptListener.unregister( taskAttempt.attemptId, taskAttempt.jvmID); + taskAttempt.reportedStatus.progress = 1.0f; + taskAttempt.updateProgressSplits(); //send the cleanup event to containerLauncher taskAttempt.eventHandler.handle(new ContainerLauncherEvent( taskAttempt.attemptId, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java index f1e4c80cc3..cd2a540b97 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java @@ -559,8 +559,9 @@ private void handleTaskAttemptCompletion(TaskAttemptId attemptId, if (attempt.getNodeHttpAddress() != null) { TaskAttemptCompletionEvent tce = recordFactory.newRecordInstance(TaskAttemptCompletionEvent.class); tce.setEventId(-1); - //TODO: XXXXXX hardcoded port - tce.setMapOutputServerAddress("http://" + attempt.getNodeHttpAddress().split(":")[0] + ":8080"); + tce.setMapOutputServerAddress("http://" + + attempt.getNodeHttpAddress().split(":")[0] + ":" + + attempt.getShufflePort()); tce.setStatus(status); tce.setAttemptId(attempt.getID()); int runTime = 0; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index 6ac96f5e53..bc6322cb0e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.v2.app.launcher; import java.io.IOException; +import java.nio.ByteBuffer; import java.security.PrivilegedAction; import java.util.HashMap; import java.util.Map; @@ -30,11 +31,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.ShuffleHandler; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.app.AMConstants; import org.apache.hadoop.mapreduce.v2.app.AppContext; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; @@ -48,6 +50,7 @@ import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -179,6 +182,7 @@ private class EventProcessor implements Runnable { this.event = event; } + @SuppressWarnings("unchecked") @Override public void run() { LOG.info("Processing the event " + event.toString()); @@ -208,15 +212,25 @@ public void run() { StartContainerRequest startRequest = recordFactory .newRecordInstance(StartContainerRequest.class); startRequest.setContainerLaunchContext(containerLaunchContext); - proxy.startContainer(startRequest); - - LOG.info("Returning from container-launch for " + taskAttemptID); + StartContainerResponse response = proxy.startContainer(startRequest); + ByteBuffer portInfo = response + .getServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID); + int port = -1; + if(portInfo != null) { + port = ShuffleHandler.deserializeMetaData(portInfo); + } + LOG.info("Shuffle port returned by ContainerManager for " + + taskAttemptID + " : " + port); + + if(port < 0) { + throw new IllegalStateException("Invalid shuffle port number " + + port + " returned for " + taskAttemptID); + } // after launching, send launched event to task attempt to move // it from ASSIGNED to RUNNING state context.getEventHandler().handle( - new TaskAttemptEvent(taskAttemptID, - TaskAttemptEventType.TA_CONTAINER_LAUNCHED)); + new TaskAttemptContainerLaunchedEvent(taskAttemptID, port)); } catch (Throwable t) { String message = "Container launch failed for " + containerID + " : " + StringUtils.stringifyException(t); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java index c1e19b9c31..073411c9b4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java @@ -42,6 +42,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent; @@ -295,8 +296,8 @@ else if (event.getType() == ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH) TaskAttemptId aId = ((ContainerRemoteLaunchEvent) event) .getTaskAttemptID(); TaskAttemptInfo attInfo = getTaskAttemptInfo(aId); - actualHandler.handle(new TaskAttemptEvent(aId, - TaskAttemptEventType.TA_CONTAINER_LAUNCHED)); + //TODO need to get the real port number MAPREDUCE-2666 + actualHandler.handle(new TaskAttemptContainerLaunchedEvent(aId, -1)); // send the status update event sendStatusUpdateEvent(aId, attInfo); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java index 93e5ae3d95..ce4825ff22 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java @@ -44,7 +44,7 @@ public interface TaskRuntimeEstimator { * already elapsed. If the projected total execution time for this task * ever exceeds its reasonable execution time, we may speculate it. * - * @param id the {@link TaskID} of the task we are asking about + * @param id the {@link TaskId} of the task we are asking about * @return the task's maximum reasonable runtime, or MAX_VALUE if * we don't have enough information to rule out any runtime, * however long. @@ -57,7 +57,7 @@ public interface TaskRuntimeEstimator { * Estimate a task attempt's total runtime. Includes the time already * elapsed. * - * @param id the {@link TaskAttemptID} of the attempt we are asking about + * @param id the {@link TaskAttemptId} of the attempt we are asking about * @return our best estimate of the attempt's runtime, or {@code -1} if * we don't have enough information yet to produce an estimate. * @@ -69,7 +69,7 @@ public interface TaskRuntimeEstimator { * Estimates how long a new attempt on this task will take if we start * one now * - * @param id the {@link TaskID} of the task we are asking about + * @param id the {@link TaskId} of the task we are asking about * @return our best estimate of a new attempt's runtime, or {@code -1} if * we don't have enough information yet to produce an estimate. * @@ -79,9 +79,9 @@ public interface TaskRuntimeEstimator { /** * * Computes the width of the error band of our estimate of the task - * runtime as returned by {@link estimatedRuntime} + * runtime as returned by {@link #estimatedRuntime(TaskAttemptId)} * - * @param id the {@link TaskAttemptID} of the attempt we are asking about + * @param id the {@link TaskAttemptId} of the attempt we are asking about * @return our best estimate of the attempt's runtime, or {@code -1} if * we don't have enough information yet to produce an estimate. * diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index a47ba3cfb8..1151b76610 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -52,6 +52,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; @@ -291,9 +292,11 @@ class MockContainerLauncher implements ContainerLauncher { public void handle(ContainerLauncherEvent event) { switch (event.getType()) { case CONTAINER_REMOTE_LAUNCH: + //We are running locally so set the shuffle port to -1 getContext().getEventHandler().handle( - new TaskAttemptEvent(event.getTaskAttemptID(), - TaskAttemptEventType.TA_CONTAINER_LAUNCHED)); + new TaskAttemptContainerLaunchedEvent(event.getTaskAttemptID(), + -1) + ); attemptLaunched(event.getTaskAttemptID()); break; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index 321dd1d22f..ce160b8f13 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -25,6 +25,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; + +import org.apache.hadoop.mapred.ShuffleHandler; import org.apache.hadoop.mapreduce.FileSystemCounter; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.JobCounter; @@ -200,6 +202,11 @@ public long getFinishTime() { return 0; } + @Override + public int getShufflePort() { + return ShuffleHandler.DEFAULT_SHUFFLE_PORT; + } + @Override public Counters getCounters() { return report.getCounters(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 7abf435ed0..37ef85858c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -515,6 +515,11 @@ public Counters getCounters() { throw new UnsupportedOperationException("Not supported yet."); } + @Override + public int getShufflePort() { + throw new UnsupportedOperationException("Not supported yet."); + } + private float getCodeRuntime() { int taskIndex = myAttemptID.getTaskId().getId(); int attemptIndex = myAttemptID.getId(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java index bb9d9d131d..a678e4660e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.List; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobPriority; import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.JobStatus.State; @@ -39,6 +40,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; @@ -279,13 +281,13 @@ public static Counters toYarn(org.apache.hadoop.mapreduce.Counters counters) { public static org.apache.hadoop.mapred.JobStatus fromYarn( JobReport jobreport, String jobFile, String trackingUrl) { - String user = null, jobName = null; JobPriority jobPriority = JobPriority.NORMAL; return new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()), jobreport.getSetupProgress(), jobreport.getMapProgress(), jobreport.getReduceProgress(), jobreport.getCleanupProgress(), fromYarn(jobreport.getJobState()), - jobPriority, user, jobName, jobFile, trackingUrl); + jobPriority, jobreport.getUser(), jobreport.getJobName(), + jobFile, trackingUrl); } public static int fromYarn(JobState state) { @@ -395,45 +397,51 @@ public static TaskTrackerInfo[] fromYarnNodes(List nodes) { return taskTrackers.toArray(new TaskTrackerInfo[nodes.size()]); } - public static JobStatus fromYarn(ApplicationReport application) { + public static JobStatus fromYarn(ApplicationReport application, + String jobFile) { String trackingUrl = application.getTrackingUrl(); trackingUrl = trackingUrl == null ? "" : trackingUrl; - - JobStatus jobStatus = + JobStatus jobStatus = new JobStatus( - TypeConverter.fromYarn(application.getApplicationId()), - 0.0f, 0.0f, 0.0f, 0.0f, - TypeConverter.fromYarn(application.getState()), - org.apache.hadoop.mapreduce.JobPriority.NORMAL, - application.getUser(), application.getName(), - application.getQueue(), "", trackingUrl - ); + TypeConverter.fromYarn(application.getApplicationId()), + 0.0f, 0.0f, 0.0f, 0.0f, + TypeConverter.fromYarn(application.getState()), + org.apache.hadoop.mapreduce.JobPriority.NORMAL, + application.getUser(), application.getName(), + application.getQueue(), jobFile, trackingUrl + ); jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url jobStatus.setStartTime(application.getStartTime()); return jobStatus; } - public static JobStatus[] fromYarnApps(List applications) { + public static JobStatus[] fromYarnApps(List applications, + Configuration conf) { List jobStatuses = new ArrayList(); for (ApplicationReport application : applications) { - jobStatuses.add(TypeConverter.fromYarn(application)); + // each applicationReport has its own jobFile + org.apache.hadoop.mapreduce.JobID jobId = + TypeConverter.fromYarn(application.getApplicationId()); + jobStatuses.add(TypeConverter.fromYarn(application, + MRApps.getJobFile(conf, application.getUser(), jobId))); } return jobStatuses.toArray(new JobStatus[jobStatuses.size()]); } public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo - queueInfo) { + queueInfo, Configuration conf) { return new QueueInfo(queueInfo.getQueueName(), queueInfo.toString(), QueueState.RUNNING, - TypeConverter.fromYarnApps(queueInfo.getApplications())); + TypeConverter.fromYarnApps(queueInfo.getApplications(), conf)); } public static QueueInfo[] fromYarnQueueInfo( - List queues) { + List queues, + Configuration conf) { List queueInfos = new ArrayList(queues.size()); for (org.apache.hadoop.yarn.api.records.QueueInfo queue : queues) { - queueInfos.add(TypeConverter.fromYarn(queue)); + queueInfos.add(TypeConverter.fromYarn(queue, conf)); } return queueInfos.toArray(new QueueInfo[queueInfos.size()]); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java index 72b3a66cef..fb585e8dd2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java @@ -27,6 +27,8 @@ public interface JobReport { public abstract float getSetupProgress(); public abstract long getStartTime(); public abstract long getFinishTime(); + public abstract String getUser(); + public abstract String getJobName(); public abstract void setJobId(JobId jobId); public abstract void setJobState(JobState jobState); @@ -36,4 +38,6 @@ public interface JobReport { public abstract void setSetupProgress(float progress); public abstract void setStartTime(long startTime); public abstract void setFinishTime(long finishTime); + public abstract void setUser(String user); + public abstract void setJobName(String jobName); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java index 5e4c2e5b27..a4033e695f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java @@ -182,6 +182,30 @@ public void setFinishTime(long finishTime) { builder.setFinishTime((finishTime)); } + @Override + public String getUser() { + JobReportProtoOrBuilder p = viaProto ? proto : builder; + return (p.getUser()); + } + + @Override + public void setUser(String user) { + maybeInitBuilder(); + builder.setUser((user)); + } + + @Override + public String getJobName() { + JobReportProtoOrBuilder p = viaProto ? proto : builder; + return (p.getJobName()); + } + + @Override + public void setJobName(String jobName) { + maybeInitBuilder(); + builder.setJobName((jobName)); + } + private JobIdPBImpl convertFromProtoFormat(JobIdProto p) { return new JobIdPBImpl(p); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java index ae87f58d7b..ee3e60e77a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java @@ -127,7 +127,7 @@ public boolean accept(Path path) { /** * Checks whether the provided path string is a valid job history file. * @param pathString the path to be checked. - * @return + * @return true is the path is a valid job history filename else return false */ public static boolean isValidJobHistoryFileName(String pathString) { return pathString.endsWith(JOB_HISTORY_FILE_EXTENSION); @@ -148,7 +148,7 @@ public static JobID getJobIDFromHistoryFilePath(String pathString) throws IOExce /** * Gets a PathFilter which would match configuration files. - * @return + * @return the patch filter {@link PathFilter} for matching conf files. */ public static PathFilter getConfFileFilter() { return CONF_FILTER; @@ -156,7 +156,7 @@ public static PathFilter getConfFileFilter() { /** * Gets a PathFilter which would match job history file names. - * @return + * @return the path filter {@link PathFilter} matching job history files. */ public static PathFilter getHistoryFileFilter() { return JOB_HISTORY_FILE_FILTER; @@ -194,8 +194,8 @@ public static String getConfiguredHistoryIntermediateDoneDirPrefix( /** * Gets the configured directory prefix for Done history files. - * @param conf - * @return + * @param conf the configuration object + * @return the done history directory */ public static String getConfiguredHistoryServerDoneDirPrefix( Configuration conf) { @@ -209,8 +209,8 @@ public static String getConfiguredHistoryServerDoneDirPrefix( /** * Gets the user directory for intermediate done history files. - * @param conf - * @return + * @param conf the configuration object + * @return the intermediate done directory for jobhistory files. */ public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException { return getConfiguredHistoryIntermediateDoneDirPrefix(conf) + File.separator @@ -262,7 +262,7 @@ public static String getIntermediateSummaryFileName(JobId jobId) { * @param logDir the log directory prefix. * @param jobId the jobId. * @param attempt attempt number for this job. - * @return + * @return the conf file path for jobs in progress. */ public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) { Path jobFilePath = null; @@ -277,7 +277,7 @@ public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) { * Gets the serial number part of the path based on the jobId and serialNumber format. * @param id * @param serialNumberFormat - * @return + * @return the serial number part of the patch based on the jobId and serial number format. */ public static String serialNumberDirectoryComponent(JobId id, String serialNumberFormat) { return String.format(serialNumberFormat, @@ -287,7 +287,7 @@ public static String serialNumberDirectoryComponent(JobId id, String serialNumbe /**Extracts the timstamp component from the path. * @param path - * @return + * @return the timestamp component from the path */ public static String getTimestampPartFromPath(String path) { Matcher matcher = TIMESTAMP_DIR_PATTERN.matcher(path); @@ -305,7 +305,7 @@ public static String getTimestampPartFromPath(String path) { * @param id * @param timestampComponent * @param serialNumberFormat - * @return + * @return the history sub directory based on the jobid, timestamp and serial number format */ public static String historyLogSubdirectory(JobId id, String timestampComponent, String serialNumberFormat) { // String result = LOG_VERSION_STRING; @@ -324,7 +324,7 @@ public static String historyLogSubdirectory(JobId id, String timestampComponent, * Gets the timestamp component based on millisecond time. * @param millisecondTime * @param debugMode - * @return + * @return the timestamp component based on millisecond time */ public static String timestampDirectoryComponent(long millisecondTime, boolean debugMode) { Calendar timestamp = Calendar.getInstance(); @@ -350,7 +350,7 @@ public static String doneSubdirsBeforeSerialTail() { /** * Computes a serial number used as part of directory naming for the given jobId. * @param id the jobId. - * @return + * @return the serial number used as part of directory naming for the given jobid */ public static int jobSerialNumber(JobId id) { return id.getId(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java index 988a502458..a6a5eddaf4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java @@ -218,7 +218,14 @@ public static void addToClassPath( private static final String STAGING_CONSTANT = ".staging"; public static Path getStagingAreaDir(Configuration conf, String user) { return new Path( - conf.get(MRConstants.APPS_STAGING_DIR_KEY) + + conf.get(MRConstants.APPS_STAGING_DIR_KEY) + Path.SEPARATOR + user + Path.SEPARATOR + STAGING_CONSTANT); } + + public static String getJobFile(Configuration conf, String user, + org.apache.hadoop.mapreduce.JobID jobId) { + Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user), + jobId.toString() + Path.SEPARATOR + MRConstants.JOB_CONF_FILE); + return jobFile.toString(); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto index 7383f54ea3..046d30d8ac 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto @@ -139,6 +139,8 @@ message JobReportProto { optional float setup_progress = 6; optional int64 start_time = 7; optional int64 finish_time = 8; + optional string user = 9; + optional string jobName = 10; } enum TaskAttemptCompletionEventStatusProto { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java index 16c3e0d1b2..bda7fb9d65 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java @@ -21,8 +21,11 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationState; +import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import org.junit.Test; public class TestTypeConverter { @@ -35,8 +38,33 @@ public void testFromYarn() throws Exception { applicationReport.setApplicationId(applicationId); applicationReport.setState(state); applicationReport.setStartTime(appStartTime); - JobStatus jobStatus = TypeConverter.fromYarn(applicationReport); + applicationReport.setUser("TestTypeConverter-user"); + JobStatus jobStatus = TypeConverter.fromYarn(applicationReport, "dummy-jobfile"); Assert.assertEquals(appStartTime, jobStatus.getStartTime()); Assert.assertEquals(state.toString(), jobStatus.getState().toString()); } + + @Test + public void testFromYarnApplicationReport() { + ApplicationId mockAppId = mock(ApplicationId.class); + when(mockAppId.getClusterTimestamp()).thenReturn(12345L); + when(mockAppId.getId()).thenReturn(6789); + + ApplicationReport mockReport = mock(ApplicationReport.class); + when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url"); + when(mockReport.getApplicationId()).thenReturn(mockAppId); + when(mockReport.getState()).thenReturn(ApplicationState.KILLED); + when(mockReport.getUser()).thenReturn("dummy-user"); + when(mockReport.getQueue()).thenReturn("dummy-queue"); + String jobFile = "dummy-path/job.xml"; + JobStatus status = TypeConverter.fromYarn(mockReport, jobFile); + Assert.assertNotNull("fromYarn returned null status", status); + Assert.assertEquals("jobFile set incorrectly", "dummy-path/job.xml", status.getJobFile()); + Assert.assertEquals("queue set incorrectly", "dummy-queue", status.getQueue()); + Assert.assertEquals("trackingUrl set incorrectly", "dummy-tracking-url", status.getTrackingUrl()); + Assert.assertEquals("user set incorrectly", "dummy-user", status.getUsername()); + Assert.assertEquals("schedulingInfo set incorrectly", "dummy-tracking-url", status.getSchedulingInfo()); + Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId()); + Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState()); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java index 2c9a701ed0..77fa446d58 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java @@ -18,10 +18,13 @@ package org.apache.hadoop.mapreduce.v2.util; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.MRConstants; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -107,4 +110,14 @@ public class TestMRApps { @Test(expected=YarnException.class) public void testTaskAttemptIDShort() { MRApps.toTaskAttemptID("attempt_0_0_0_m_0"); } + + @Test public void testGetJobFileWithUser() { + Configuration conf = new Configuration(); + conf.set(MRConstants.APPS_STAGING_DIR_KEY, "/my/path/to/staging"); + String jobFile = MRApps.getJobFile(conf, "dummy-user", new JobID("dummy-job", 12345)); + assertNotNull("getJobFile results in null.", jobFile); + assertEquals("jobFile with specified user is not as expected.", + "/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile); + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index 83c1aae03e..be98eb7cdf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -44,7 +44,7 @@ org.apache.avro avro-maven-plugin - 1.5.2 + 1.5.3 generate-sources diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java index 30183934e6..9d8a2a71da 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java @@ -372,7 +372,7 @@ protected Group newFileSystemGroup() { * @param id the id of the counter within the group (0 to N-1) * @param name the internal name of the counter * @return the counter for that name - * @deprecated use {@link findCounter(String, String)} instead + * @deprecated use {@link #findCounter(String, String)} instead */ @Deprecated public Counter findCounter(String group, int id, String name) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java index 4a96877873..194b80caf1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java @@ -49,7 +49,7 @@ /** * JobClient is the primary interface for the user-job to interact - * with the {@link JobTracker}. + * with the cluster. * * JobClient provides facilities to submit jobs, track their * progress, access component-tasks' reports/logs, get the Map-Reduce cluster @@ -72,7 +72,7 @@ * on the distributed file-system. * *
  • - * Submitting the job to the JobTracker and optionally monitoring + * Submitting the job to the cluster and optionally monitoring * it's status. *
  • *

    @@ -152,7 +152,7 @@ static class NetworkedJob implements RunningJob { /** * We store a JobProfile and a timestamp for when we last * acquired the job profile. If the job is null, then we cannot - * perform any of the tasks. The job might be null if the JobTracker + * perform any of the tasks. The job might be null if the cluster * has completely forgotten about the job. (eg, 24 hours after the * job completes.) */ @@ -348,7 +348,7 @@ public synchronized void killTask(String taskId, boolean shouldFail) throws IOEx } /** - * Fetch task completion events from jobtracker for this job. + * Fetch task completion events from cluster for this job. */ public synchronized TaskCompletionEvent[] getTaskCompletionEvents( int startFrom) throws IOException { @@ -429,7 +429,7 @@ public JobClient() { /** * Build a job client with the given {@link JobConf}, and connect to the - * default {@link JobTracker}. + * default cluster * * @param conf the job configuration. * @throws IOException @@ -440,7 +440,7 @@ public JobClient(JobConf conf) throws IOException { /** * Build a job client with the given {@link Configuration}, - * and connect to the default {@link JobTracker}. + * and connect to the default cluster * * @param conf the configuration. * @throws IOException @@ -450,7 +450,7 @@ public JobClient(Configuration conf) throws IOException { } /** - * Connect to the default {@link JobTracker}. + * Connect to the default cluster * @param conf the job configuration. * @throws IOException */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java index 5adf28968f..49d12d764d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java @@ -476,7 +476,6 @@ public String[] getLocalDirs() throws IOException { /** * Use MRAsyncDiskService.moveAndDeleteAllVolumes instead. - * @see org.apache.hadoop.mapreduce.util.MRAsyncDiskService#cleanupAllVolumes() */ @Deprecated public void deleteLocalFiles() throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 97ebfa1dc6..01c0b1bba4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -1736,6 +1736,7 @@ private void mergeParts() throws IOException, InterruptedException, indexCacheList.get(0).writeToFile( mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]), job); } + sortPhase.complete(); return; } @@ -1776,6 +1777,7 @@ private void mergeParts() throws IOException, InterruptedException, } finally { finalOut.close(); } + sortPhase.complete(); return; } { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java index 775fef27d9..da13934a6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -/** The states of a {@link TaskInProgress} as seen by the JobTracker. +/** The states of a Tasks. */ @InterfaceAudience.Private @InterfaceStability.Unstable diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java index 96c2bc53a1..e59bbc9bdf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java @@ -1124,8 +1124,14 @@ public boolean monitorAndPrintJob() IntegerRanges reduceRanges = getProfileTaskRange(false); int progMonitorPollIntervalMillis = Job.getProgressPollInterval(clientConf); - while (!isComplete()) { - Thread.sleep(progMonitorPollIntervalMillis); + /* make sure to report full progress after the job is done */ + boolean reportedAfterCompletion = false; + while (!isComplete() || !reportedAfterCompletion) { + if (isComplete()) { + reportedAfterCompletion = true; + } else { + Thread.sleep(progMonitorPollIntervalMillis); + } String report = (" map " + StringUtils.formatPercent(mapProgress(), 0)+ " reduce " + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java index fc3cc6bb5a..09dc1cb6e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java @@ -43,8 +43,6 @@ * * @see TaskID * @see TaskAttemptID - * @see org.apache.hadoop.mapred.JobTracker#getNewJobId() - * @see org.apache.hadoop.mapred.JobTracker#getStartTime() */ @InterfaceAudience.Public @InterfaceStability.Stable diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java index 02251823f6..e52d52b9f1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java @@ -22,8 +22,7 @@ /** * Place holder for cluster level configuration keys. * - * These keys are used by both {@link JobTracker} and {@link TaskTracker}. The - * keys should have "mapreduce.cluster." as the prefix. + * The keys should have "mapreduce.cluster." as the prefix. * */ @InterfaceAudience.Private diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java index a7f5a4c118..b5f9482edf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java @@ -23,6 +23,8 @@ import java.util.ArrayList; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -47,6 +49,7 @@ @InterfaceAudience.Public @InterfaceStability.Evolving public class ControlledJob { + private static final Log LOG = LogFactory.getLog(ControlledJob.class); // A job will be in one of the following states public static enum State {SUCCESS, WAITING, RUNNING, READY, FAILED, @@ -235,6 +238,17 @@ public void killJob() throws IOException, InterruptedException { job.killJob(); } + public synchronized void failJob(String message) throws IOException, InterruptedException { + try { + if(job != null && this.state == State.RUNNING) { + job.killJob(); + } + } finally { + this.state = State.FAILED; + this.message = message; + } + } + /** * Check the state of this running job. The state may * remain the same, become SUCCESS or FAILED. @@ -322,6 +336,7 @@ protected synchronized void submit() { job.submit(); this.state = State.RUNNING; } catch (Exception ioe) { + LOG.info(getJobName()+" got an error while submitting ",ioe); this.state = State.FAILED; this.message = StringUtils.stringifyException(ioe); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java index 494e5e9dce..a2bc70aaf5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java @@ -21,13 +21,16 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Hashtable; +import java.util.Iterator; +import java.util.LinkedList; import java.util.List; -import java.util.Map; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob.State; +import org.apache.hadoop.util.StringUtils; /** * This class encapsulates a set of MapReduce jobs and its dependency. @@ -49,17 +52,16 @@ @InterfaceAudience.Public @InterfaceStability.Evolving public class JobControl implements Runnable { + private static final Log LOG = LogFactory.getLog(JobControl.class); // The thread can be in one of the following state public static enum ThreadState {RUNNING, SUSPENDED,STOPPED, STOPPING, READY}; private ThreadState runnerState; // the thread state - private Map waitingJobs; - private Map readyJobs; - private Map runningJobs; - private Map successfulJobs; - private Map failedJobs; + private LinkedList jobsInProgress = new LinkedList(); + private LinkedList successfulJobs = new LinkedList(); + private LinkedList failedJobs = new LinkedList(); private long nextJobID; private String groupName; @@ -69,46 +71,51 @@ public static enum ThreadState {RUNNING, SUSPENDED,STOPPED, STOPPING, READY}; * @param groupName a name identifying this group */ public JobControl(String groupName) { - this.waitingJobs = new Hashtable(); - this.readyJobs = new Hashtable(); - this.runningJobs = new Hashtable(); - this.successfulJobs = new Hashtable(); - this.failedJobs = new Hashtable(); this.nextJobID = -1; this.groupName = groupName; this.runnerState = ThreadState.READY; } private static List toList( - Map jobs) { + LinkedList jobs) { ArrayList retv = new ArrayList(); synchronized (jobs) { - for (ControlledJob job : jobs.values()) { + for (ControlledJob job : jobs) { retv.add(job); } } return retv; } + synchronized private List getJobsIn(State state) { + LinkedList l = new LinkedList(); + for(ControlledJob j: jobsInProgress) { + if(j.getJobState() == state) { + l.add(j); + } + } + return l; + } + /** * @return the jobs in the waiting state */ public List getWaitingJobList() { - return toList(this.waitingJobs); + return getJobsIn(State.WAITING); } /** * @return the jobs in the running state */ public List getRunningJobList() { - return toList(this.runningJobs); + return getJobsIn(State.RUNNING); } /** * @return the jobs in the ready state */ public List getReadyJobsList() { - return toList(this.readyJobs); + return getJobsIn(State.READY); } /** @@ -126,34 +133,6 @@ private String getNextJobID() { nextJobID += 1; return this.groupName + this.nextJobID; } - - private static void addToQueue(ControlledJob aJob, - Map queue) { - synchronized(queue) { - queue.put(aJob.getJobID(), aJob); - } - } - - private void addToQueue(ControlledJob aJob) { - Map queue = getQueue(aJob.getJobState()); - addToQueue(aJob, queue); - } - - private Map getQueue(State state) { - Map retv = null; - if (state == State.WAITING) { - retv = this.waitingJobs; - } else if (state == State.READY) { - retv = this.readyJobs; - } else if (state == State.RUNNING) { - retv = this.runningJobs; - } else if (state == State.SUCCESS) { - retv = this.successfulJobs; - } else if (state == State.FAILED || state == State.DEPENDENT_FAILED) { - retv = this.failedJobs; - } - return retv; - } /** * Add a new job. @@ -163,7 +142,7 @@ synchronized public String addJob(ControlledJob aJob) { String id = this.getNextJobID(); aJob.setJobID(id); aJob.setJobState(State.WAITING); - this.addToQueue(aJob); + jobsInProgress.add(aJob); return id; } @@ -211,47 +190,8 @@ public void resume () { } } - synchronized private void checkRunningJobs() - throws IOException, InterruptedException { - - Map oldJobs = null; - oldJobs = this.runningJobs; - this.runningJobs = new Hashtable(); - - for (ControlledJob nextJob : oldJobs.values()) { - nextJob.checkState(); - this.addToQueue(nextJob); - } - } - - synchronized private void checkWaitingJobs() - throws IOException, InterruptedException { - Map oldJobs = null; - oldJobs = this.waitingJobs; - this.waitingJobs = new Hashtable(); - - for (ControlledJob nextJob : oldJobs.values()) { - nextJob.checkState(); - this.addToQueue(nextJob); - } - } - - synchronized private void startReadyJobs() { - Map oldJobs = null; - oldJobs = this.readyJobs; - this.readyJobs = new Hashtable(); - - for (ControlledJob nextJob : oldJobs.values()) { - //Submitting Job to Hadoop - nextJob.submit(); - this.addToQueue(nextJob); - } - } - synchronized public boolean allFinished() { - return this.waitingJobs.size() == 0 && - this.readyJobs.size() == 0 && - this.runningJobs.size() == 0; + return jobsInProgress.isEmpty(); } /** @@ -262,39 +202,83 @@ synchronized public boolean allFinished() { * Submit the jobs in ready state */ public void run() { - this.runnerState = ThreadState.RUNNING; - while (true) { - while (this.runnerState == ThreadState.SUSPENDED) { + try { + this.runnerState = ThreadState.RUNNING; + while (true) { + while (this.runnerState == ThreadState.SUSPENDED) { + try { + Thread.sleep(5000); + } + catch (Exception e) { + //TODO the thread was interrupted, do something!!! + } + } + + synchronized(this) { + Iterator it = jobsInProgress.iterator(); + while(it.hasNext()) { + ControlledJob j = it.next(); + LOG.debug("Checking state of job "+j); + switch(j.checkState()) { + case SUCCESS: + successfulJobs.add(j); + it.remove(); + break; + case FAILED: + case DEPENDENT_FAILED: + failedJobs.add(j); + it.remove(); + break; + case READY: + j.submit(); + break; + case RUNNING: + case WAITING: + //Do Nothing + break; + } + } + } + + if (this.runnerState != ThreadState.RUNNING && + this.runnerState != ThreadState.SUSPENDED) { + break; + } try { Thread.sleep(5000); } catch (Exception e) { - + //TODO the thread was interrupted, do something!!! + } + if (this.runnerState != ThreadState.RUNNING && + this.runnerState != ThreadState.SUSPENDED) { + break; } } - try { - checkRunningJobs(); - checkWaitingJobs(); - startReadyJobs(); - } catch (Exception e) { - this.runnerState = ThreadState.STOPPED; - } - if (this.runnerState != ThreadState.RUNNING && - this.runnerState != ThreadState.SUSPENDED) { - break; - } - try { - Thread.sleep(5000); - } - catch (Exception e) { - - } - if (this.runnerState != ThreadState.RUNNING && - this.runnerState != ThreadState.SUSPENDED) { - break; - } + }catch(Throwable t) { + LOG.error("Error while trying to run jobs.",t); + //Mark all jobs as failed because we got something bad. + failAllJobs(t); } this.runnerState = ThreadState.STOPPED; } + synchronized private void failAllJobs(Throwable t) { + String message = "Unexpected System Error Occured: "+ + StringUtils.stringifyException(t); + Iterator it = jobsInProgress.iterator(); + while(it.hasNext()) { + ControlledJob j = it.next(); + try { + j.failJob(message); + } catch (IOException e) { + LOG.error("Error while tyring to clean up "+j.getJobName(), e); + } catch (InterruptedException e) { + LOG.error("Error while tyring to clean up "+j.getJobName(), e); + } finally { + failedJobs.add(j); + it.remove(); + } + } + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java new file mode 100644 index 0000000000..2098cd8d1b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.LineNumberReader; +import java.io.StringReader; + +import junit.framework.TestCase; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobStatus.State; +import org.apache.hadoop.mapreduce.protocol.ClientProtocol; +import org.apache.log4j.Layout; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.WriterAppender; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +/** + * Test to make sure that command line output for + * job monitoring is correct and prints 100% for map and reduce before + * successful completion. + */ +public class TestJobMonitorAndPrint extends TestCase { + private Job job; + private Configuration conf; + private ClientProtocol clientProtocol; + + @Before + public void setUp() throws IOException { + conf = new Configuration(); + clientProtocol = mock(ClientProtocol.class); + Cluster cluster = mock(Cluster.class); + when(cluster.getConf()).thenReturn(conf); + when(cluster.getClient()).thenReturn(clientProtocol); + JobStatus jobStatus = new JobStatus(new JobID("job_000", 1), 0f, 0f, 0f, 0f, + State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-jobfile", "tmp-url"); + job = new Job(cluster, jobStatus, conf); + job = spy(job); + } + + @Test + public void testJobMonitorAndPrint() throws Exception { + JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, + State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-jobfile", "tmp-url"); + JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, + State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-jobfile", "tmp-url"); + + doAnswer( + new Answer() { + @Override + public TaskCompletionEvent[] answer(InvocationOnMock invocation) + throws Throwable { + return new TaskCompletionEvent[0]; + } + } + ).when(job).getTaskCompletionEvents(anyInt(), anyInt()); + + when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); + // setup the logger to capture all logs + Layout layout = + Logger.getRootLogger().getAppender("stdout").getLayout(); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + WriterAppender appender = new WriterAppender(layout, os); + appender.setThreshold(Level.ALL); + Logger qlogger = Logger.getLogger(Job.class); + qlogger.addAppender(appender); + + job.monitorAndPrintJob(); + + qlogger.removeAppender(appender); + LineNumberReader r = new LineNumberReader(new StringReader(os.toString())); + String line; + boolean foundHundred = false; + boolean foundComplete = false; + String match_1 = "map 100% reduce 100%"; + String match_2 = "completed successfully"; + while ((line = r.readLine()) != null) { + foundHundred = line.contains(match_1); + if (foundHundred) + break; + } + line = r.readLine(); + foundComplete = line.contains(match_2); + assertTrue(foundHundred); + assertTrue(foundComplete); + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java index 0f1b08547b..d29139f2df 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java @@ -90,7 +90,9 @@ public CompletedJob(Configuration conf, JobId jobId, Path historyFile, report.setJobState(JobState.valueOf(jobInfo.getJobStatus())); report.setStartTime(jobInfo.getLaunchTime()); report.setFinishTime(jobInfo.getFinishTime()); - //TOODO Possibly populate job progress. Never used. + report.setJobName(jobInfo.getJobname()); + report.setUser(jobInfo.getUsername()); + //TODO Possibly populate job progress. Never used. //report.setMapProgress(progress) //report.setReduceProgress(progress) } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java index 3759be511e..5f303440d0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java @@ -146,4 +146,10 @@ public long getLaunchTime() { public long getFinishTime() { return report.getFinishTime(); } + + @Override + public int getShufflePort() { + throw new UnsupportedOperationException("Not supported yet."); + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java index 707217356e..4e865a39a4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.lang.reflect.Method; import java.security.PrivilegedAction; +import java.util.HashMap; import java.util.List; import org.apache.commons.logging.Log; @@ -50,7 +51,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.Counters; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; -import org.apache.hadoop.mapreduce.v2.jobhistory.JHConfig; +import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.UserGroupInformation; @@ -61,24 +62,20 @@ import org.apache.hadoop.yarn.api.records.ApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier; import org.apache.hadoop.yarn.security.SchedulerSecurityInfo; -import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo; class ClientServiceDelegate { private static final Log LOG = LogFactory.getLog(ClientServiceDelegate.class); - private static final NotRunningJob NOTSTARTEDJOB = - new NotRunningJob(JobState.NEW); - - private static final NotRunningJob FAILEDJOB = - new NotRunningJob(JobState.FAILED); - - private static final NotRunningJob KILLEDJOB = - new NotRunningJob(JobState.KILLED); + + // Caches for per-user NotRunningJobs + private static HashMap> notRunningJobs = + new HashMap>(); private final Configuration conf; private final JobID jobId; @@ -101,6 +98,24 @@ class ClientServiceDelegate { this.appId = TypeConverter.toYarn(jobId).getAppId(); } + // Get the instance of the NotRunningJob corresponding to the specified + // user and state + private NotRunningJob getNotRunningJob(String user, JobState state) { + synchronized (notRunningJobs) { + HashMap map = notRunningJobs.get(state); + if (map == null) { + map = new HashMap(); + notRunningJobs.put(state, map); + } + NotRunningJob notRunningJob = map.get(user); + if (notRunningJob == null) { + notRunningJob = new NotRunningJob(user, state); + map.put(user, notRunningJob); + } + return notRunningJob; + } + } + private MRClientProtocol getProxy() throws YarnRemoteException { if (!forceRefresh && realProxy != null) { return realProxy; @@ -149,26 +164,30 @@ private MRClientProtocol getProxy() throws YarnRemoteException { } } - /** we just want to return if its allocating, so that we dont + /** we just want to return if its allocating, so that we don't * block on it. This is to be able to return job status - * on a allocating Application. + * on an allocating Application. */ + String user = application.getUser(); + if (user == null) { + throw new YarnRemoteExceptionPBImpl("User is not set in the application report"); + } if (application.getState() == ApplicationState.NEW || application.getState() == ApplicationState.SUBMITTED) { realProxy = null; - return NOTSTARTEDJOB; + return getNotRunningJob(user, JobState.NEW); } if (application.getState() == ApplicationState.FAILED) { realProxy = null; - return FAILEDJOB; + return getNotRunningJob(user, JobState.FAILED); } if (application.getState() == ApplicationState.KILLED) { - realProxy = null; - return KILLEDJOB; - } + realProxy = null; + return getNotRunningJob(user, JobState.KILLED); + } //History server can serve a job only if application //succeeded. @@ -270,17 +289,15 @@ String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0) return result; } - JobStatus getJobStatus(JobID oldJobID) throws YarnRemoteException, - YarnRemoteException { + JobStatus getJobStatus(JobID oldJobID) throws YarnRemoteException { org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID); - String stagingDir = conf.get("yarn.apps.stagingDir"); - String jobFile = stagingDir + "/" + jobId.toString(); - MRClientProtocol protocol; GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class); request.setJobId(jobId); - JobReport report = ((GetJobReportResponse) invoke("getJobReport", + JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport(); + String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID); + //TODO: add tracking url in JobReport return TypeConverter.fromYarn(report, jobFile, ""); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java index 3fa01eb5f4..a40fcedda3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java @@ -63,8 +63,10 @@ public class NotRunningJob implements MRClientProtocol { RecordFactoryProvider.getRecordFactory(null); private final JobState jobState; + private final String user; - NotRunningJob(JobState jobState) { + NotRunningJob(String username, JobState jobState) { + this.user = username; this.jobState = jobState; } @@ -104,7 +106,10 @@ public GetJobReportResponse getJobReport(GetJobReportRequest request) JobReport jobReport = recordFactory.newRecordInstance(JobReport.class); jobReport.setJobId(request.getJobId()); - jobReport.setJobState(jobState); + jobReport.setJobState(this.jobState); + + jobReport.setUser(this.user); + // TODO: Add jobName & other job information that is available resp.setJobReport(jobReport); return resp; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index 0459009aec..ac606c0330 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -120,7 +120,7 @@ public JobStatus[] getAllJobs() throws IOException, InterruptedException { recordFactory.newRecordInstance(GetAllApplicationsRequest.class); GetAllApplicationsResponse response = applicationsManager.getAllApplications(request); - return TypeConverter.fromYarnApps(response.getApplicationList()); + return TypeConverter.fromYarnApps(response.getApplicationList(), this.conf); } @@ -182,7 +182,7 @@ public QueueInfo getQueue(String queueName) throws IOException, getQueueInfoRequest(queueName, true, false, false); recordFactory.newRecordInstance(GetQueueInfoRequest.class); return TypeConverter.fromYarn( - applicationsManager.getQueueInfo(request).getQueueInfo()); + applicationsManager.getQueueInfo(request).getQueueInfo(), this.conf); } private void getChildQueues(org.apache.hadoop.yarn.api.records.QueueInfo parent, @@ -216,7 +216,7 @@ public QueueInfo[] getQueues() throws IOException, InterruptedException { getQueueInfoRequest(ROOT, false, true, true)).getQueueInfo(); getChildQueues(rootQueue, queues); - return TypeConverter.fromYarnQueueInfo(queues); + return TypeConverter.fromYarnQueueInfo(queues, this.conf); } @@ -229,7 +229,7 @@ public QueueInfo[] getRootQueues() throws IOException, InterruptedException { getQueueInfoRequest(ROOT, false, true, false)).getQueueInfo(); getChildQueues(rootQueue, queues); - return TypeConverter.fromYarnQueueInfo(queues); + return TypeConverter.fromYarnQueueInfo(queues, this.conf); } public QueueInfo[] getChildQueues(String parent) throws IOException, @@ -242,7 +242,7 @@ public QueueInfo[] getChildQueues(String parent) throws IOException, getQueueInfoRequest(parent, false, true, false)).getQueueInfo(); getChildQueues(parentQueue, queues); - return TypeConverter.fromYarnQueueInfo(queues); + return TypeConverter.fromYarnQueueInfo(queues, this.conf); } public String getStagingAreaDir() throws IOException, InterruptedException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider index b4fd93041a..cc406ee7fe 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider @@ -12,17 +12,3 @@ # limitations under the License. # org.apache.hadoop.mapred.YarnClientProtocolProvider -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -org.apache.hadoop.mapred.YarnClientProtocolProvider \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java index 8c97ccc855..24df9be59b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java @@ -268,6 +268,7 @@ public GetApplicationReportResponse getApplicationReport( String[] split = AMHOSTADDRESS.split(":"); application.setHost(split[0]); application.setRpcPort(Integer.parseInt(split[1])); + application.setUser("TestClientRedirect-user"); GetApplicationReportResponse response = recordFactory .newRecordInstance(GetApplicationReportResponse.class); response.setApplicationReport(application); @@ -397,6 +398,11 @@ public GetJobReportResponse getJobReport(GetJobReportRequest request) JobReport jobReport = recordFactory.newRecordInstance(JobReport.class); jobReport.setJobId(request.getJobId()); jobReport.setJobState(JobState.RUNNING); + jobReport.setJobName("TestClientRedirect-jobname"); + jobReport.setUser("TestClientRedirect-user"); + jobReport.setStartTime(0L); + jobReport.setFinishTime(1L); + GetJobReportResponse response = recordFactory .newRecordInstance(GetJobReportResponse.class); response.setJobReport(jobReport); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index 4155858058..efe8c3acb7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -72,6 +72,8 @@ public void init(Configuration conf) { conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class); + // Non-standard shuffle port + conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 8083); conf.setClass(NMConfig.NM_CONTAINER_EXECUTOR_CLASS, DefaultContainerExecutor.class, ContainerExecutor.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java index fae2aa0d20..c0747b0d53 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java @@ -57,6 +57,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskCompletionEvent; import org.apache.hadoop.mapreduce.TaskID; +import org.apache.hadoop.mapreduce.TaskReport; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; @@ -105,7 +106,8 @@ public static void setup() throws IOException { if (mrCluster == null) { mrCluster = new MiniMRYarnCluster(TestMRJobs.class.getName()); - mrCluster.init(new Configuration()); + Configuration conf = new Configuration(); + mrCluster.init(conf); mrCluster.start(); } @@ -150,7 +152,7 @@ public void testSleepJob() throws IOException, InterruptedException, Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); verifySleepJobCounters(job); - + verifyTaskProgress(job); // TODO later: add explicit "isUber()" checks of some sort (extend // JobStatus?)--compare against MRJobConfig.JOB_UBERTASK_ENABLE value @@ -172,6 +174,18 @@ protected void verifySleepJobCounters(Job job) throws InterruptedException, .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0); } + + protected void verifyTaskProgress(Job job) throws InterruptedException, + IOException { + for (TaskReport taskReport : job.getTaskReports(TaskType.MAP)) { + Assert.assertTrue(0.9999f < taskReport.getProgress() + && 1.0001f > taskReport.getProgress()); + } + for (TaskReport taskReport : job.getTaskReports(TaskType.REDUCE)) { + Assert.assertTrue(0.9999f < taskReport.getProgress() + && 1.0001f > taskReport.getProgress()); + } + } @Test public void testRandomWriter() throws IOException, InterruptedException, @@ -197,6 +211,7 @@ public void testRandomWriter() throws IOException, InterruptedException, boolean succeeded = job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); + // Make sure there are three files in the output-dir RemoteIterator iterator = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index e2620b3894..d63b8ca924 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -120,7 +120,8 @@ public class ShuffleHandler extends AbstractService private static final JobTokenSecretManager secretManager = new JobTokenSecretManager(); - public static final String SHUFFLE_PORT = "mapreduce.shuffle.port"; + public static final String SHUFFLE_PORT_CONFIG_KEY = "mapreduce.shuffle.port"; + public static final int DEFAULT_SHUFFLE_PORT = 8080; @Metrics(about="Shuffle output metrics", context="mapred") static class ShuffleMetrics implements ChannelFutureListener { @@ -155,15 +156,59 @@ public ShuffleHandler() { this(DefaultMetricsSystem.instance()); } + /** + * Serialize the shuffle port into a ByteBuffer for use later on. + * @param port the port to be sent to the ApplciationMaster + * @return the serialized form of the port. + */ + static ByteBuffer serializeMetaData(int port) throws IOException { + //TODO these bytes should be versioned + DataOutputBuffer port_dob = new DataOutputBuffer(); + port_dob.writeInt(port); + return ByteBuffer.wrap(port_dob.getData(), 0, port_dob.getLength()); + } + + /** + * A helper function to deserialize the metadata returned by ShuffleHandler. + * @param meta the metadata returned by the ShuffleHandler + * @return the port the Shuffle Handler is listening on to serve shuffle data. + */ + public static int deserializeMetaData(ByteBuffer meta) throws IOException { + //TODO this should be returning a class not just an int + DataInputByteBuffer in = new DataInputByteBuffer(); + in.reset(meta); + int port = in.readInt(); + return port; + } + + /** + * A helper function to serialize the JobTokenIdentifier to be sent to the + * ShuffleHandler as ServiceData. + * @param jobToken the job token to be used for authentication of + * shuffle data requests. + * @return the serialized version of the jobToken. + */ + public static ByteBuffer serializeServiceData(Token jobToken) throws IOException { + //TODO these bytes should be versioned + DataOutputBuffer jobToken_dob = new DataOutputBuffer(); + jobToken.write(jobToken_dob); + return ByteBuffer.wrap(jobToken_dob.getData(), 0, jobToken_dob.getLength()); + } + + static Token deserializeServiceData(ByteBuffer secret) throws IOException { + DataInputByteBuffer in = new DataInputByteBuffer(); + in.reset(secret); + Token jt = new Token(); + jt.readFields(in); + return jt; + } + @Override public void initApp(String user, ApplicationId appId, ByteBuffer secret) { // TODO these bytes should be versioned try { - DataInputByteBuffer in = new DataInputByteBuffer(); - in.reset(secret); - Token jt = new Token(); - jt.readFields(in); - // TODO: Once SHuffle is out of NM, this can use MR APIs + Token jt = deserializeServiceData(secret); + // TODO: Once SHuffle is out of NM, this can use MR APIs JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId()); userRsrc.put(jobId.toString(), user); LOG.info("Added token for " + jobId.toString()); @@ -193,7 +238,7 @@ public synchronized void start() { Configuration conf = getConfig(); ServerBootstrap bootstrap = new ServerBootstrap(selector); bootstrap.setPipelineFactory(new HttpPipelineFactory(conf)); - port = conf.getInt("mapreduce.shuffle.port", 8080); + port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT); accepted.add(bootstrap.bind(new InetSocketAddress(port))); LOG.info(getName() + " listening on port " + port); super.start(); @@ -207,6 +252,17 @@ public synchronized void stop() { super.stop(); } + @Override + public synchronized ByteBuffer getMeta() { + try { + return serializeMetaData(port); + } catch (IOException e) { + LOG.error("Error during getMeta", e); + // TODO add API to AuxiliaryServices to report failures + return null; + } + } + Shuffle createShuffle() { return new Shuffle(getConfig()); } @@ -306,7 +362,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt) HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); try { verifyRequest(jobId, ctx, request, response, - new URL("http", "", 8080, reqUri)); + new URL("http", "", port, reqUri)); } catch (IOException e) { LOG.warn("Shuffle failure ", e); sendError(ctx, e.getMessage(), UNAUTHORIZED); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java index 97f0c9740a..c1526cc572 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java @@ -26,11 +26,21 @@ import org.jboss.netty.channel.ChannelFuture; import org.junit.Test; +import static org.junit.Assert.*; import static org.apache.hadoop.test.MockitoMaker.*; public class TestShuffleHandler { static final long MiB = 1024 * 1024; + @Test public void testSerializeMeta() throws Exception { + assertEquals(1, ShuffleHandler.deserializeMetaData( + ShuffleHandler.serializeMetaData(1))); + assertEquals(-1, ShuffleHandler.deserializeMetaData( + ShuffleHandler.serializeMetaData(-1))); + assertEquals(8080, ShuffleHandler.deserializeMetaData( + ShuffleHandler.serializeMetaData(8080))); + } + @Test public void testShuffleMetrics() throws Exception { MetricsSystem ms = new MetricsSystemImpl(); ShuffleHandler sh = new ShuffleHandler(ms); diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn index b6edcd93c9..31354dc714 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn +++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn @@ -148,132 +148,18 @@ IFS= # add hadoop-common libs to CLASSPATH -if [ -d "$HADOOP_COMMON_HOME/build/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/classes -fi -if [ -d "$HADOOP_COMMON_HOME/build/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build -fi -if [ -d "$HADOOP_COMMON_HOME/build/test/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/test/classes -fi -if [ -d "$HADOOP_COMMON_HOME/build/test/core/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/test/core/classes -fi - -for f in $HADOOP_COMMON_HOME/hadoop-*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_COMMON_HOME/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_COMMON_HOME/share/hadoop/common/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_COMMON_HOME/share/hadoop/common/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_COMMON_HOME/share/hadoop/hdfs/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Common/common" ]; then -for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Common/common/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done -fi - -if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Hdfs/common" ]; then -for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Hdfs/common/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done -fi - -if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop/common" ]; then -for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop/common/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done -fi +CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/share/hadoop/common'/*' +CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/share/hadoop/common/lib'/*' # add hadoop-hdfs libs to CLASSPATH -for f in $HADOOP_HDFS_HOME/hadoop-*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_HDFS_HOME/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -if [ -d "$HADOOP_HDFS_HOME/build/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/classes -fi -if [ -d "$HADOOP_HDFS_HOME/build/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build -fi -if [ -d "$HADOOP_HDFS_HOME/build/test/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/test/classes -fi -if [ -d "$HADOOP_HDFS_HOME/build/tools" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/tools -fi - -# add hadoop-mapred libs to CLASSPATH - -for f in $HADOOP_HDFS_HOME/hadoop-*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_HDFS_HOME/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -if [ -d "$HADOOP_MAPRED_HOME/build/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/classes -fi -if [ -d "$HADOOP_MAPRED_HOME/build/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build -fi -if [ -d "$HADOOP_MAPRED_HOME/build/test/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/test/classes -fi -if [ -d "$HADOOP_MAPRED_HOME/build/tools" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/tools -fi - -# for releases, add core mapred jar & webapps to CLASSPATH -if [ -d "$HADOOP_MAPRED_HOME/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME -fi - -# add libs to CLASSPATH -for f in $HADOOP_MAPRED_HOME/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -# add libs to CLASSPATH -for f in $HADOOP_MAPRED_HOME/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -# add libs to CLASSPATH -for f in $YARN_HOME/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done +CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs'/*' +CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib'/*' # add yarn libs to CLASSPATH -for f in $YARN_HOME/modules/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done -# add user-specified CLASSPATH last -if [ "$YARN_USER_CLASSPATH_FIRST" = "" ] && [ "$YARN_CLASSPATH" != "" ]; then - CLASSPATH=${CLASSPATH}:${YARN_CLASSPATH} -fi +CLASSPATH=${CLASSPATH}:$YARN_HOME/modules'/*' +CLASSPATH=${CLASSPATH}:$YARN_HOME/lib'/*' # default log directory & file if [ "$YARN_LOG_DIR" = "" ]; then diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java index 58dc9e02e4..d436f8e67f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java @@ -18,16 +18,94 @@ package org.apache.hadoop.yarn.api; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +/** + *

    The protocol between a live instance of ApplicationMaster + * and the ResourceManager.

    + * + *

    This is used by the ApplicationMaster to register/unregister + * and to request and obtain resources in the cluster from the + * ResourceManager.

    + */ +@Public +@Stable public interface AMRMProtocol { - public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnRemoteException; - public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnRemoteException;; - public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException; + + /** + *

    The interface used by a new ApplicationMaster to register + * with the ResourceManager.

    + * + *

    The ApplicationMaster needs to provide details such + * as RPC Port, HTTP tracking url etc. as specified in + * {@link RegisterApplicationMasterRequest}.

    + * + *

    The ResourceManager responds with critical details such + * as minimum and maximum resource capabilities in the cluster as specified in + * {@link RegisterApplicationMasterResponse}.

    + * + * @param request registration request + * @return registration respose + * @throws YarnRemoteException + */ + public RegisterApplicationMasterResponse registerApplicationMaster( + RegisterApplicationMasterRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by an ApplicationMaster to notify the + * ResourceManager about its completion (success or failed).

    + * + *

    The ApplicationMaster has to provide details such as + * final state, diagnostics (in case of failures) etc. as specified in + * {@link FinishApplicationMasterRequest}.

    + * + *

    The ResourceManager responds with + * {@link FinishApplicationMasterResponse}.

    + * + * @param request completion request + * @return completion response + * @throws YarnRemoteException + */ + public FinishApplicationMasterResponse finishApplicationMaster( + FinishApplicationMasterRequest request) + throws YarnRemoteException; + + /** + *

    The main interface between an ApplicationMaster + * and the ResourceManager.

    + * + *

    The ApplicationMaster uses this interface to provide a list + * of {@link ResourceRequest} and returns unused {@link Container} allocated + * to it via {@link AllocateRequest}.

    + * + *

    This also doubles up as a heartbeat to let the + * ResourceManager know that the ApplicationMaster + * is alive. Thus, applications should use periodically make this call to + * be kept alive.

    + * + *

    The ResourceManager responds with list of allocated + * {@link Container}, status of completed containers and headroom information + * for the application.

    + * + *

    The ApplicationMaster can use the available headroom + * (resources) to decide how to utilized allocated resources and make + * informed decisions about future resource requests.

    + * + * @param request allocation request + * @return allocation response + * @throws YarnRemoteException + */ + public AllocateResponse allocate(AllocateRequest request) + throws YarnRemoteException; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java index 494cbfd537..db4c4790cf 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.api; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; + import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest; @@ -36,16 +39,190 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +/** + *

    The protocol between clients and the ResourceManager + * to submit/abort jobs and to get information on applications, cluster metrics, + * nodes, queues and ACLs.

    + */ +@Public +@Stable public interface ClientRMProtocol { - public GetNewApplicationIdResponse getNewApplicationId(GetNewApplicationIdRequest request) throws YarnRemoteException; - public GetApplicationReportResponse getApplicationReport(GetApplicationReportRequest request) throws YarnRemoteException; - public SubmitApplicationResponse submitApplication(SubmitApplicationRequest request) throws YarnRemoteException; - public FinishApplicationResponse finishApplication(FinishApplicationRequest request) throws YarnRemoteException; - public GetClusterMetricsResponse getClusterMetrics(GetClusterMetricsRequest request) throws YarnRemoteException; - public GetAllApplicationsResponse getAllApplications(GetAllApplicationsRequest request) throws YarnRemoteException; - public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) throws YarnRemoteException; - public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) throws YarnRemoteException; - public GetQueueUserAclsInfoResponse getQueueUserAcls(GetQueueUserAclsInfoRequest request) throws YarnRemoteException; + /** + *

    The interface used by clients to obtain a new {@link ApplicationId} for + * submitting new applications.

    + * + *

    The ResourceManager responds with a new, monotonically + * increasing, {@link ApplicationId} which is used by the client to submit + * a new application.

    + * + * @param request request to get a new ApplicationId + * @return new ApplicationId to be used to submit an application + * @throws YarnRemoteException + * @see #submitApplication(SubmitApplicationRequest) + */ + public GetNewApplicationIdResponse getNewApplicationId( + GetNewApplicationIdRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to submit a new application to the + * ResourceManager.

    + * + *

    The client is required to provide details such as queue, + * {@link Resource} required to run the ApplicationMaster, + * the equivalent of {@link ContainerLaunchContext} for launching + * the ApplicationMaster etc. via the + * {@link SubmitApplicationRequest}.

    + * + *

    Currently the ResourceManager sends an immediate (empty) + * {@link SubmitApplicationResponse} on accepting the submission and throws + * an exception if it rejects the submission.

    + * + *

    In secure mode,the ResourceManager verifies access to + * queues etc. before accepting the application submission.

    + * + * @param request request to submit a new application + * @return (empty) response on accepting the submission + * @throws YarnRemoteException + * @see #getNewApplicationId(GetNewApplicationIdRequest) + */ + public SubmitApplicationResponse submitApplication( + SubmitApplicationRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to request the + * ResourceManager to abort submitted application.

    + * + *

    The client, via {@link FinishApplicationRequest} provides the + * {@link ApplicationId} of the application to be aborted.

    + * + *

    In secure mode,the ResourceManager verifies access to the + * application, queue etc. before terminating the application.

    + * + *

    Currently, the ResourceManager returns an empty response + * on success and throws an exception on rejecting the request.

    + * + * @param request request to abort a submited application + * @return ResourceManager returns an empty response + * on success and throws an exception on rejecting the request + * @throws YarnRemoteException + * @see #getQueueUserAcls(GetQueueUserAclsInfoRequest) + */ + public FinishApplicationResponse finishApplication( + FinishApplicationRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to get a report of an Application from + * the ResourceManager.

    + * + *

    The client, via {@link GetApplicationReportRequest} provides the + * {@link ApplicationId} of the application.

    + * + *

    In secure mode,the ResourceManager verifies access to the + * application, queue etc. before accepting the request.

    + * + *

    The ResourceManager responds with a + * {@link GetApplicationReportResponse} which includes the + * {@link ApplicationReport} for the application.

    + * + * @param request request for an application report + * @return application report + * @throws YarnRemoteException + */ + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to get metrics about the cluster from + * the ResourceManager.

    + * + *

    The ResourceManager responds with a + * {@link GetClusterMetricsResponse} which includes the + * {@link YarnClusterMetrics} with details such as number of current + * nodes in the cluster.

    + * + * @param request request for cluster metrics + * @return cluster metrics + * @throws YarnRemoteException + */ + public GetClusterMetricsResponse getClusterMetrics( + GetClusterMetricsRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to get a report of all Applications + * in the cluster from the ResourceManager.

    + * + *

    The ResourceManager responds with a + * {@link GetAllApplicationsResponse} which includes the + * {@link ApplicationReport} for all the applications.

    + * + * @param request request for report on all running applications + * @return report on all running applications + * @throws YarnRemoteException + */ + public GetAllApplicationsResponse getAllApplications( + GetAllApplicationsRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to get a report of all nodes + * in the cluster from the ResourceManager.

    + * + *

    The ResourceManager responds with a + * {@link GetClusterNodesResponse} which includes the + * {@link NodeReport} for all the nodes in the cluster.

    + * + * @param request request for report on all nodes + * @return report on all nodes + * @throws YarnRemoteException + */ + public GetClusterNodesResponse getClusterNodes( + GetClusterNodesRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to get information about queues + * from the ResourceManager.

    + * + *

    The client, via {@link GetQueueInfoRequest}, can ask for details such + * as used/total resources, child queues, running applications etc.

    + * + *

    In secure mode,the ResourceManager verifies access before + * providing the information.

    + * + * @param request request to get queue information + * @return queue information + * @throws YarnRemoteException + */ + public GetQueueInfoResponse getQueueInfo( + GetQueueInfoRequest request) + throws YarnRemoteException; + + /** + *

    The interface used by clients to get information about queue + * acls for current users from the ResourceManager. + *

    + * + *

    The ResourceManager responds with queue acls for all + * existing queues.

    + * + * @param request request to get queue acls for current user + * @return queue acls for current user + * @throws YarnRemoteException + */ + public GetQueueUserAclsInfoResponse getQueueUserAcls( + GetQueueUserAclsInfoRequest request) + throws YarnRemoteException; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java index a778793992..1d16cec26c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java @@ -18,21 +18,108 @@ package org.apache.hadoop.yarn.api; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +/** + *

    The protocol between an ApplicationMaster and a + * NodeManager to start/stop containers and to get status + * of running containers.

    + * + *

    If security is enabled the NodeManager verifies that the + * ApplicationMaster has truly been allocated the container + * by the ResourceManager and also verifies all interactions such + * as stopping the container or obtaining status information for the container. + *

    + */ +@Public +@Stable public interface ContainerManager { + /** + *

    The ApplicationMaster requests a NodeManager + * to start a {@link Container} allocated to it using this interface. + *

    + * + *

    The ApplicationMaster has to provide details such as + * allocated resource capability, security tokens (if enabled), command + * to be executed to start the container, environment for the process, + * necessary binaries/jar/shared-objects etc. via the + * {@link ContainerLaunchContext} in the {@link StartContainerRequest}.

    + * + *

    Currently the NodeManager sends an immediate, empty + * response via {@link StartContainerResponse} to signify acceptance of the + * request and throws an exception in case of errors. The + * ApplicationMaster can use + * {@link #getContainerStatus(GetContainerStatusRequest)} to get updated + * status of the to-be-launched or launched container.

    + * + * @param request request to start a container + * @return empty response to indicate acceptance of the request + * or an exception + * @throws YarnRemoteException + */ + @Public + @Stable StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException; + /** + *

    The ApplicationMaster requests a NodeManager + * to stop a {@link Container} allocated to it using this interface. + *

    + * + *

    The ApplicationMaster

    sends a + * {@link StopContainerRequest} which includes the {@link ContainerId} of the + * container to be stopped.

    + * + *

    Currently the NodeManager sends an immediate, empty + * response via {@link StopContainerResponse} to signify acceptance of the + * request and throws an exception in case of errors. The + * ApplicationMaster can use + * {@link #getContainerStatus(GetContainerStatusRequest)} to get updated + * status of the container.

    + * + * @param request request to stop a container + * @return empty response to indicate acceptance of the request + * or an exception + * @throws YarnRemoteException + */ + @Public + @Stable StopContainerResponse stopContainer(StopContainerRequest request) throws YarnRemoteException; + /** + *

    The api used by the ApplicationMaster to request for + * current status of a Container from the + * NodeManager.

    + * + *

    The ApplicationMaster

    sends a + * {@link GetContainerStatusRequest} which includes the {@link ContainerId} of + * the container whose status is needed.

    + * + *

    The NodeManager responds with + *{@link GetContainerStatusResponse} which includes the + *{@link ContainerStatus} of the container.

    + * + * @param request request to get ContainerStatus of a container + * with the specified ContainerId + * @return ContainerStatus of the container + * @throws YarnRemoteException + */ + @Public + @Stable GetContainerStatusResponse getContainerStatus( GetContainerStatusRequest request) throws YarnRemoteException; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java index 2b410be85e..ff03dfea9e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java @@ -20,36 +20,176 @@ import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.AMRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ResourceRequest; +/** + *

    The core request sent by the ApplicationMaster to the + * ResourceManager to obtain resources in the cluster.

    + * + *

    The request includes: + *

      + *
    • + * {@link ApplicationAttemptId} being managed by the + * ApplicationMaster + *
    • + *
    • A response id to track duplicate responses.
    • + *
    • Progress information.
    • + *
    • + * A list of {@link ResourceRequest} to inform the + * ResourceManager about the application's + * resource requirements. + *
    • + *
    • + * A list of unused {@link Container} which are being returned. + *
    • + *
    • + *
    + *

    + * + * @see AMRMProtocol#allocate(AllocateRequest) + */ +@Public +@Stable public interface AllocateRequest { + /** + * Get the ApplicationAttemptId being managed by the + * ApplicationMaster. + * @return ApplicationAttemptId being managed by the + * ApplicationMaster + */ + @Public + @Stable ApplicationAttemptId getApplicationAttemptId(); + + /** + * Set the ApplicationAttemptId being managed by the + * ApplicationMaster. + * @param applicationAttemptId ApplicationAttemptId being managed + * by the ApplicationMaster + */ + @Public + @Stable void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId); + /** + * Get the response id. + * @return response id + */ + @Public + @Stable int getResponseId(); + + /** + * Set the response id + * @param id response id + */ + @Public + @Stable void setResponseId(int id); + /** + * Get the current progress of application. + * @return current progress of application + */ + @Public + @Stable float getProgress(); + + /** + * Set the current progress of application + * @param progress current progress of application + */ + @Public + @Stable void setProgress(float progress); + /** + * Get the list of ResourceRequest to upate the + * ResourceManager about the application's resource requirements. + * @return the list of ResourceRequest + */ + @Public + @Stable List getAskList(); + + @Private + @Unstable ResourceRequest getAsk(int index); + + @Private + @Unstable int getAskCount(); + /** + * Add list of ResourceRequest to upate the + * ResourceManager about the application's resource requirements. + * @param resourceRequest list of ResourceRequest to upate the + * ResourceManager about the application's + * resource requirements + */ + @Public + @Stable + void addAllAsks(List resourceRequest); + + @Private + @Unstable + void addAsk(ResourceRequest request); + + @Private + @Unstable + void removeAsk(int index); + + @Private + @Unstable + void clearAsks(); + + /** + * Get the list of ContainerId of unused containers being + * released by the ApplicationMaster. + * @return list of ContainerId of unused containers being + * released by the ApplicationMaster + */ + @Public + @Stable List getReleaseList(); + + @Private + @Unstable ContainerId getRelease(int index); + + @Private + @Unstable int getReleaseCount(); - void addAllAsks(List resourceRequest); - void addAsk(ResourceRequest request); - void removeAsk(int index); - void clearAsks(); - + /** + * Add the list of ContainerId of unused containers being + * released by the ApplicationMaster + * @param releaseContainers list of ContainerId of unused + * containers being released by the < + * code>ApplicationMaster + */ + @Public + @Stable void addAllReleases(List releaseContainers); + + @Private + @Unstable void addRelease(ContainerId container); + + @Private + @Unstable void removeRelease(int index); + + @Private + @Unstable void clearReleases(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java index 00409c50a5..cdf8382b01 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java @@ -18,10 +18,47 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.AMRMProtocol; import org.apache.hadoop.yarn.api.records.AMResponse; +import org.apache.hadoop.yarn.api.records.Container; +/** + *

    The response sent by the ResourceManager the + * ApplicationMaster during resource negotiation.

    + * + *

    The response, via {@link AMResponse}, includes: + *

      + *
    • Response ID to track duplicate responses.
    • + *
    • + * A reboot flag to let the ApplicationMaster that its + * horribly out of sync and needs to reboot.
    • + *
    • A list of newly allocated {@link Container}.
    • + *
    • A list of completed {@link Container}.
    • + *
    • + * The available headroom for resources in the cluster for the + * application. + *
    • + *
    + *

    + * + * @see AMRMProtocol#allocate(AllocateRequest) + */ +@Public +@Stable public interface AllocateResponse { + /** + * Get the {@link AMResponse} sent by the ResourceManager. + * @return AMResponse sent by the ResourceManager + */ + @Public + @Stable public abstract AMResponse getAMResponse(); - + + @Private + @Unstable public abstract void setAMResponse(AMResponse amResponse); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java index fbb71e753a..36d2af22f9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java @@ -18,21 +18,101 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.AMRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; - +/** + *

    The finalization request sent by the ApplicationMaster to + * inform the ResourceManager about its completion.

    + * + *

    The final request includes details such: + *

      + *
    • + * {@link ApplicationAttemptId} being managed by the + * ApplicationMaster + *
    • + *
    • Final state of the ApplicationMaster
    • + *
    • + * Diagnostic information in case of failure of the + * ApplicationMaster + *
    • + *
    • Tracking URL
    • + *
    + *

    + * + * @see AMRMProtocol#finishApplicationMaster(FinishApplicationMasterRequest) + */ public interface FinishApplicationMasterRequest { + /** + * Get the ApplicationAttemptId being managed by the + * ApplicationMaster. + * @return ApplicationAttemptId being managed by the + * ApplicationMaster + */ + @Public + @Stable ApplicationAttemptId getApplicationAttemptId(); + + /** + * Set the ApplicationAttemptId being managed by the + * ApplicationMaster. + * @param applicationAttemptId ApplicationAttemptId being managed + * by the ApplicationMaster + */ + @Public + @Stable void setAppAttemptId(ApplicationAttemptId applicationAttemptId); + /** + * Get final state of the ApplicationMaster. + * @return final state of the ApplicationMaster + */ + @Public + @Stable String getFinalState(); - void setFinalState(String string); + + /** + * Set final state of the ApplicationMaster + * @param finalState final state of the ApplicationMaster + */ + @Public + @Stable + void setFinalState(String finalState); + /** + * Get diagnostic information on application failure. + * @return diagnostic information on application failure + */ + @Public + @Stable String getDiagnostics(); - void setDiagnostics(String string); + + /** + * Set diagnostic information on application failure. + * @param diagnostics diagnostic information on application failure + */ + @Public + @Stable + void setDiagnostics(String diagnostics); + /** + * Get the tracking URL for the ApplicationMaster. + * @return tracking URLfor the ApplicationMaster + */ + @Public + @Stable String getTrackingUrl(); - void setTrackingUrl(String historyUrl); + + /** + * Set the tracking URLfor the ApplicationMaster + * @param url tracking URLfor the + * ApplicationMaster + */ + @Public + @Stable + void setTrackingUrl(String url); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java index 1c30ac4a65..b7710c41ca 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java @@ -18,6 +18,20 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.AMRMProtocol; + +/** + *

    The response sent by the ResourceManager to a + * ApplicationMaster on it's completion.

    + * + *

    Currently, this is empty.

    + * + * @see AMRMProtocol#finishApplicationMaster(FinishApplicationMasterRequest) + */ +@Public +@Stable public interface FinishApplicationMasterResponse { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java index 0074b6f983..023ee3c4ac 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationRequest.java @@ -18,10 +18,34 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationId; +/** + *

    The request sent by the client to the ResourceManager + * to abort a submitted application.

    + * + *

    The request includes the {@link ApplicationId} of the application to be + * aborted.

    + * + * @see ClientRMProtocol#finishApplication(FinishApplicationRequest) + */ +@Public +@Stable public interface FinishApplicationRequest { + /** + * Get the ApplicationId of the application to be aborted. + * @return ApplicationId of the application to be aborted + */ + @Public + @Stable public abstract ApplicationId getApplicationId(); + @Private + @Unstable public abstract void setApplicationId(ApplicationId applicationId); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java index 18e76a4522..cd0c728e53 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationResponse.java @@ -18,6 +18,20 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    The response sent by the ResourceManager to the client + * aborting a submitted application.

    + * + *

    Currently it's empty.

    + * + * @see ClientRMProtocol#finishApplication(FinishApplicationRequest) + */ +@Public +@Stable public interface FinishApplicationResponse { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsRequest.java index f3b5bf7cb9..a958331244 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsRequest.java @@ -18,5 +18,17 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    The request from clients to get a report of all Applications + * in the cluster from the ResourceManager.

    + * + * @see ClientRMProtocol#getAllApplications(GetAllApplicationsRequest) + */ +@Public +@Stable public interface GetAllApplicationsRequest { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsResponse.java index 641d8a3493..3e2e401bb9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllApplicationsResponse.java @@ -20,9 +20,36 @@ import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationReport; +/** + *

    The response sent by the ResourceManager to a client + * requesting an {@link ApplicationReport} for all applications.

    + * + *

    The ApplicationReport for each application includes details + * such as user, queue, name, host on which the ApplicationMaster + * is running, RPC port, tracking URL, diagnostics, start time etc.

    + * + * @see ApplicationReport + * @see ClientRMProtocol#getAllApplications(GetAllApplicationsRequest) + */ +@Public +@Stable public interface GetAllApplicationsResponse { + /** + * Get ApplicationReport for all applications. + * @return ApplicationReport for all applications + */ + @Public + @Stable List getApplicationList(); + + @Private + @Unstable void setApplicationList(List applications); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportRequest.java index 51f366cf9f..d6f9ee48f0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportRequest.java @@ -18,9 +18,34 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +/** + *

    The request sent by a client to the ResourceManager to + * get an {@link ApplicationReport} for an application.

    + * + *

    The request should include the {@link ApplicationId} of the + * application.

    + * + * @see ClientRMProtocol#getApplicationReport(GetApplicationReportRequest) + * @see ApplicationReport + */ +@Public +@Stable public interface GetApplicationReportRequest { - public abstract ApplicationId getApplicationId(); - public abstract void setApplicationId(ApplicationId applicationId); + /** + * Get the ApplicationId of the application. + * @return ApplicationId of the application + */ + public ApplicationId getApplicationId(); + + /** + * Set the ApplicationId of the application + * @param applicationId ApplicationId of the application + */ + public void setApplicationId(ApplicationId applicationId); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportResponse.java index ddc18d811a..cc93f20c27 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationReportResponse.java @@ -18,9 +18,35 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationReport; +/** + *

    The response sent by the ResourceManager to a client + * requesting an application report.

    + * + *

    The response includes an {@link ApplicationReport} which has details such + * as user, queue, name, host on which the ApplicationMaster is + * running, RPC port, tracking URL, diagnostics, start time etc.

    + * + * @see ClientRMProtocol#getApplicationReport(GetApplicationReportRequest) + */ +@Public +@Stable public interface GetApplicationReportResponse { - public abstract ApplicationReport getApplicationReport(); - public abstract void setApplicationReport(ApplicationReport ApplicationReport); + /** + * Get the ApplicationReport for the application. + * @return ApplicationReport for the application + */ + @Public + @Stable + public ApplicationReport getApplicationReport(); + + @Private + @Unstable + public void setApplicationReport(ApplicationReport ApplicationReport); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsRequest.java index 000653948d..81b9342803 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsRequest.java @@ -18,6 +18,18 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    The request sent by clients to get cluster metrics from the + * ResourceManager.

    + * + * @see ClientRMProtocol#getClusterMetrics(GetClusterMetricsRequest) + */ +@Public +@Stable public interface GetClusterMetricsRequest { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java index c802d3eee9..691bcc0b38 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java @@ -18,9 +18,32 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; +/** + *

    The response sent by the ResourceManager to a client + * requesting cluster metrics.

    + * + * @see YarnClusterMetrics + * @see ClientRMProtocol#getClusterMetrics(GetClusterMetricsRequest) + */ +@Public +@Stable public interface GetClusterMetricsResponse { - public abstract YarnClusterMetrics getClusterMetrics(); - public abstract void setClusterMetrics(YarnClusterMetrics metrics); + /** + * Get the YarnClusterMetrics for the cluster. + * @return YarnClusterMetrics for the cluster + */ + @Public + @Stable + public YarnClusterMetrics getClusterMetrics(); + + @Private + @Unstable + public void setClusterMetrics(YarnClusterMetrics metrics); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesRequest.java index c6326ed263..35ab169371 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesRequest.java @@ -18,6 +18,18 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    The request from clients to get a report of all nodes + * in the cluster from the ResourceManager.

    + * + * @see ClientRMProtocol#getClusterNodes(GetClusterNodesRequest) + */ +@Public +@Stable public interface GetClusterNodesRequest { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesResponse.java index 991f9d2b47..e9475fd8d2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodesResponse.java @@ -20,9 +20,36 @@ import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.NodeReport; +/** + *

    The response sent by the ResourceManager to a client + * requesting an {@link NodeReport} for all nodes.

    + * + *

    The NodeReport contains per-node information such as + * available resources, number of containers, tracking url, rack name, health + * status etc. + * + * @see NodeReport + * @see ClientRMProtocol#getClusterNodes(GetClusterNodesRequest) + */ +@Public +@Stable public interface GetClusterNodesResponse { + /** + * Get NodeReport for all nodes in the cluster. + * @return NodeReport for all nodes in the cluster + */ + @Public + @Stable List getNodeReports(); + + @Private + @Unstable void setNodeReports(List nodeReports); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusRequest.java index c9498b0d67..6cd91f2dd9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusRequest.java @@ -18,9 +18,38 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +/** + *

    The request sent by the ApplicationMaster to the + * NodeManager to get {@link ContainerStatus} of a container.

    + * + * @see ContainerManager#getContainerStatus(GetContainerStatusRequest) + */ +@Public +@Stable public interface GetContainerStatusRequest { + /** + * Get the ContainerId of container for which to obtain the + * ContainerStatus. + * @return ContainerId of container for which to obtain the + * ContainerStatus + */ + @Public + @Stable public abstract ContainerId getContainerId(); + + /** + * Set the ContainerId of container for which to obtain the + * ContainerStatus + * @param containerId ContainerId of container for which to + * obtain the ContainerStatus + */ + @Public + @Stable public abstract void setContainerId(ContainerId containerId); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusResponse.java index bcf0ffd99a..03b361d837 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusResponse.java @@ -18,9 +18,32 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.records.ContainerStatus; +/** + *

    The response sent by the NodeManager to the + * ApplicationMaster when asked to obtainer status + * of a container.

    + * + * @see ContainerManager#getContainerStatus(GetContainerStatusRequest) + */ +@Public +@Stable public interface GetContainerStatusResponse { + /** + * Get the ContainerStatus of the container. + * @return ContainerStatus of the container + */ + @Public + @Stable public abstract ContainerStatus getStatus(); + + @Private + @Unstable public abstract void setStatus(ContainerStatus containerStatus); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java index a914092f07..c841070080 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdRequest.java @@ -18,6 +18,19 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationId; + +/** + *

    The request sent by clients to get a new {@link ApplicationId} for + * submitting an application.

    + * + * @see ClientRMProtocol#getNewApplicationId(GetNewApplicationIdRequest) + */ +@Public +@Stable public interface GetNewApplicationIdRequest { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdResponse.java index 62a48a9b5f..93a1ab680b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNewApplicationIdResponse.java @@ -18,9 +18,33 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationId; +/** + *

    The response sent by the ResourceManager to the client for + * a request to a new {@link ApplicationId} for submitting applications.

    + * + * @see ClientRMProtocol#getNewApplicationId(GetNewApplicationIdRequest) + */ +@Public +@Stable public interface GetNewApplicationIdResponse { + /** + * Get the new ApplicationId allocated by the + * ResourceManager. + * @return new ApplicationId allocated by the + * ResourceManager + */ + @Public + @Stable public abstract ApplicationId getApplicationId(); + + @Private + @Unstable public abstract void setApplicationId(ApplicationId applicationId); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java index 872f8aba5f..435b03cf17 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java @@ -18,17 +18,70 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    The request sent by clients to get queue information + * from the ResourceManager.

    + * + * @see ClientRMProtocol#getQueueInfo(GetQueueInfoRequest) + */ +@Public +@Stable public interface GetQueueInfoRequest { + /** + * Get the queue name for which to get queue information. + * @return queue name for which to get queue information + */ String getQueueName(); + + /** + * Set the queue name for which to get queue information + * @param queueName queue name for which to get queue information + */ void setQueueName(String queueName); + /** + * Is information about active applications required? + * @return true if applications' information is to be included, + * else false + */ boolean getIncludeApplications(); + + /** + * Should we get fetch information about active applications? + * @param includeApplications fetch information about active + * applications? + */ void setIncludeApplications(boolean includeApplications); + /** + * Is information about child queues required? + * @return true if information about child queues is required, + * else false + */ boolean getIncludeChildQueues(); + + /** + * Should we fetch information about child queues? + * @param includeChildQueues fetch information about child queues? + */ void setIncludeChildQueues(boolean includeChildQueues); + /** + * Is information on the entire child queue hierarchy required? + * @return true if information about entire hierarchy is + * required, false otherwise + */ boolean getRecursive(); + + /** + * Should we fetch information on the entire child queue hierarchy? + * @param recursive fetch information on the entire child queue + * hierarchy? + */ void setRecursive(boolean recursive); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java index 8cef4e93c6..830945744f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java @@ -18,9 +18,34 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.QueueInfo; +/** + *

    The response sent by the ResourceManager to a client + * requesting information about queues in the system.

    + * + *

    The response includes a {@link QueueInfo} which has details such as + * queue name, used/total capacities, running applications, child queues etc + * .

    + * + * @see QueueInfo + * @see ClientRMProtocol#getQueueInfo(GetQueueInfoRequest) + */ +@Public +@Stable public interface GetQueueInfoResponse { + /** + * Get the QueueInfo for the specified queue. + * @return QueueInfo for the specified queue + */ QueueInfo getQueueInfo(); + + @Private + @Unstable void setQueueInfo(QueueInfo queueInfo); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoRequest.java index 44fd32e8bb..ec16c1cb11 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoRequest.java @@ -18,6 +18,18 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    The request sent by clients to the ResourceManager to + * get queue acls for the current user.

    + * + * @see ClientRMProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest) + */ +@Public +@Stable public interface GetQueueUserAclsInfoRequest { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoResponse.java index 3aa09f064b..58f640593a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueUserAclsInfoResponse.java @@ -20,12 +20,39 @@ import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; +import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +/** + *

    The response sent by the ResourceManager to clients + * seeking queue acls for the user.

    + * + *

    The response contains a list of {@link QueueUserACLInfo} which + * provides information about {@link QueueACL} per queue.

    + * + * @see QueueACL + * @see QueueUserACLInfo + * @see ClientRMProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest) + */ +@Public +@Stable public interface GetQueueUserAclsInfoResponse { + /** + * Get the QueueUserACLInfo per queue for the user. + * @return QueueUserACLInfo per queue for the user + */ + @Public + @Stable public List getUserAclsInfoList(); + @Private + @Unstable public void setUserAclsInfoList(List queueUserAclsList); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java index 202dca7af9..1accbd22d6 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java @@ -18,19 +18,108 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.AMRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +/** + *

    The request sent by the ApplicationMaster to + * ResourceManager on registration.

    + * + *

    The registration includes details such as: + *

      + *
    • + * {@link ApplicationAttemptId} being managed by the + * ApplicationMaster + *
    • + *
    • Hostname on which the AM is running.
    • + *
    • RPC Port
    • + *
    • Tracking URL
    • + *
    + *

    + * + * @see AMRMProtocol#registerApplicationMaster(RegisterApplicationMasterRequest) + */ +@Public +@Stable public interface RegisterApplicationMasterRequest { + /** + * Get the ApplicationAttemptId being managed by the + * ApplicationMaster. + * @return ApplicationAttemptId being managed by the + * ApplicationMaster + */ + @Public + @Stable ApplicationAttemptId getApplicationAttemptId(); + + /** + * Set the ApplicationAttemptId being managed by the + * ApplicationMaster. + * @param applicationAttemptId ApplicationAttemptId being managed + * by the ApplicationMaster + */ + @Public + @Stable void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId); + /** + * Get the host on which the ApplicationMaster is + * running. + * @return host on which the ApplicationMaster is running + */ + @Public + @Stable String getHost(); + + /** + * Set the host on which the ApplicationMaster is + * running. + * @param host host on which the ApplicationMaster + * is running + */ + @Private + @Unstable void setHost(String host); + /** + * Get the RPC port on which the ApplicationMaster + * is responding. + * @return the RPC port on which the ApplicationMaster is + * responding + */ + @Public + @Stable int getRpcPort(); + + /** + * Set the RPC port on which the ApplicationMaster is + * responding. + * @param port RPC port on which the ApplicationMaster is + * responding + */ + @Public + @Stable void setRpcPort(int port); + /** + * Get the tracking URL for the ApplicationMaster. + * @return tracking URL for the ApplicationMaster + */ + @Public + @Stable String getTrackingUrl(); - void setTrackingUrl(String string); + + /** + * Set the tracking URL for the ApplicationMaster. + * @param trackingUrl tracking URL for the + * ApplicationMaster + */ + @Public + @Stable + void setTrackingUrl(String trackingUrl); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java index 9d595cf382..9a1895fb14 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java @@ -18,11 +18,53 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.AMRMProtocol; import org.apache.hadoop.yarn.api.records.Resource; +/** + *

    The response sent by the ResourceManager to a new + * ApplicationMaster on registration.

    + * + *

    The response contains critical details such as: + *

      + *
    • Minimum capability for allocated resources in the cluster.
    • + *
    • Maximum capability for allocated resources in the cluster.
    • + *
    + *

    + * + * @see AMRMProtocol#registerApplicationMaster(RegisterApplicationMasterRequest) + */ +@Public +@Stable public interface RegisterApplicationMasterResponse { + + /** + * Get the minimum capability for any {@link Resource} allocated by the + * ResourceManager in the cluster. + * @return minimum capability of allocated resources in the cluster + */ + @Public + @Stable public Resource getMinimumResourceCapability(); + + @Private + @Unstable public void setMinimumResourceCapability(Resource capability); + + /** + * Get the maximum capability for any {@link Resource} allocated by the + * ResourceManager in the cluster. + * @return maximum capability of allocated resources in the cluster + */ + @Public + @Stable public Resource getMaximumResourceCapability(); + + @Private + @Unstable public void setMaximumResourceCapability(Resource capability); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java index 1226c91a2a..a3f7e4b70d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java @@ -18,10 +18,44 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +/** + *

    The request sent by the ApplicationMaster to the + * NodeManager to start a container.

    + * + *

    The ApplicationMaster has to provide details such as + * allocated resource capability, security tokens (if enabled), command + * to be executed to start the container, environment for the process, + * necessary binaries/jar/shared-objects etc. via the + * {@link ContainerLaunchContext}.

    + * + * @see ContainerManager#startContainer(StartContainerRequest) + */ +@Public +@Stable public interface StartContainerRequest { + /** + * Get the ContainerLaunchContext for the container to be started + * by the NodeManager. + * + * @return ContainerLaunchContext for the container to be started + * by the NodeManager + */ + @Public + @Stable public abstract ContainerLaunchContext getContainerLaunchContext(); + /** + * Set the ContainerLaunchContext for the container to be started + * by the NodeManager + * @param context ContainerLaunchContext for the container to be + * started by the NodeManager + */ + @Public + @Stable public abstract void setContainerLaunchContext(ContainerLaunchContext context); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java index 7153d19985..a83c520a7c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerResponse.java @@ -18,6 +18,28 @@ package org.apache.hadoop.yarn.api.protocolrecords; -public interface StartContainerResponse { +import java.nio.ByteBuffer; +import java.util.Map; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

    The response sent by the NodeManager to the + * ApplicationMaster when asked to start an + * allocated container.

    + * + * @see ContainerManager#startContainer(StartContainerRequest) + */ +@Public +@Stable +public interface StartContainerResponse { + Map getAllServiceResponse(); + ByteBuffer getServiceResponse(String key); + + void addAllServiceResponse(Map serviceResponse); + void setServiceResponse(String key, ByteBuffer value); + void removeServiceResponse(String key); + void clearServiceResponse(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerRequest.java index 0debb28b67..2188939705 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerRequest.java @@ -18,9 +18,33 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.records.ContainerId; +/** + *

    The request sent by the ApplicationMaster to the + * NodeManager to stop a container.

    + * + * @see ContainerManager#stopContainer(StopContainerRequest) + */ +@Public +@Stable public interface StopContainerRequest { + /** + * Get the ContainerId of the container to be stopped. + * @return ContainerId of container to be stopped + */ + @Public + @Stable ContainerId getContainerId(); + + /** + * Set the ContainerId of the container to be stopped. + * @param containerId ContainerId of the container to be stopped + */ + @Public + @Stable void setContainerId(ContainerId containerId); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerResponse.java index ffbdd8a246..6bfd7cffe8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StopContainerResponse.java @@ -18,6 +18,19 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

    The response sent by the NodeManager to the + * ApplicationMaster when asked to stop an + * allocated container.

    + * + * @see ContainerManager#stopContainer(StopContainerRequest) + */ +@Public +@Stable public interface StopContainerResponse { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationRequest.java index ac3e72eea4..0d54842896 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationRequest.java @@ -18,9 +18,43 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.Resource; +/** + *

    The request sent by a client to submit an application to the + * ResourceManager.

    + * + *

    The request, via {@link ApplicationSubmissionContext}, contains + * details such as queue, {@link Resource} required to run the + * ApplicationMaster, the equivalent of + * {@link ContainerLaunchContext} for launching the + * ApplicationMaster etc. + * + * @see ClientRMProtocol#submitApplication(SubmitApplicationRequest) + */ +@Public +@Stable public interface SubmitApplicationRequest { + /** + * Get the ApplicationSubmissionContext for the application. + * @return ApplicationSubmissionContext for the application + */ + @Public + @Stable public abstract ApplicationSubmissionContext getApplicationSubmissionContext(); - public abstract void setApplicationSubmissionContext(ApplicationSubmissionContext context); + + /** + * Set the ApplicationSubmissionContext for the application. + * @param context ApplicationSubmissionContext for the + * application + */ + @Public + @Stable + public abstract void setApplicationSubmissionContext( + ApplicationSubmissionContext context); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationResponse.java index 9b1df9f1f8..618641be6a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SubmitApplicationResponse.java @@ -18,6 +18,18 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    The response sent by the ResourceManager to a client on + * application submission.

    + * + * @see ClientRMProtocol#submitApplication(SubmitApplicationRequest) + */ +@Public +@Stable public interface SubmitApplicationResponse { } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java index ed415c8e4f..4fbdf97c7c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerResponsePBImpl.java @@ -19,17 +19,26 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.List; + + import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse; import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerResponseProto; - - +import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerResponseProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.StringBytesMapProto; public class StartContainerResponsePBImpl extends ProtoBase implements StartContainerResponse { StartContainerResponseProto proto = StartContainerResponseProto.getDefaultInstance(); StartContainerResponseProto.Builder builder = null; boolean viaProto = false; - + + private Map serviceResponse = null; + public StartContainerResponsePBImpl() { builder = StartContainerResponseProto.newBuilder(); } @@ -40,20 +49,113 @@ public StartContainerResponsePBImpl(StartContainerResponseProto proto) { } public StartContainerResponseProto getProto() { + mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } + private void mergeLocalToBuilder() { + if (this.serviceResponse != null) { + addServiceResponseToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + private void maybeInitBuilder() { if (viaProto || builder == null) { builder = StartContainerResponseProto.newBuilder(proto); } viaProto = false; } - + + + @Override + public Map getAllServiceResponse() { + initServiceResponse(); + return this.serviceResponse; + } + @Override + public ByteBuffer getServiceResponse(String key) { + initServiceResponse(); + return this.serviceResponse.get(key); + } + private void initServiceResponse() { + if (this.serviceResponse != null) { + return; + } + StartContainerResponseProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getServiceResponseList(); + this.serviceResponse = new HashMap(); - - + for (StringBytesMapProto c : list) { + this.serviceResponse.put(c.getKey(), convertFromProtoFormat(c.getValue())); + } + } + + @Override + public void addAllServiceResponse(final Map serviceResponse) { + if (serviceResponse == null) + return; + initServiceResponse(); + this.serviceResponse.putAll(serviceResponse); + } + + private void addServiceResponseToProto() { + maybeInitBuilder(); + builder.clearServiceResponse(); + if (serviceResponse == null) + return; + Iterable iterable = new Iterable() { + + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator keyIter = serviceResponse.keySet().iterator(); + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public StringBytesMapProto next() { + String key = keyIter.next(); + return StringBytesMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(serviceResponse.get(key))).build(); + } + + @Override + public boolean hasNext() { + return keyIter.hasNext(); + } + }; + } + }; + builder.addAllServiceResponse(iterable); + } + @Override + public void setServiceResponse(String key, ByteBuffer val) { + initServiceResponse(); + this.serviceResponse.put(key, val); + } + @Override + public void removeServiceResponse(String key) { + initServiceResponse(); + this.serviceResponse.remove(key); + } + @Override + public void clearServiceResponse() { + initServiceResponse(); + this.serviceResponse.clear(); + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java index a14b641f68..54b0055133 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java @@ -20,31 +20,142 @@ import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.AMRMProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; + +/** + *

    The response sent by the ResourceManager the + * ApplicationMaster during resource negotiation.

    + * + *

    The response includes: + *

      + *
    • Response ID to track duplicate responses.
    • + *
    • + * A reboot flag to let the ApplicationMaster that its + * horribly out of sync and needs to reboot.
    • + *
    • A list of newly allocated {@link Container}.
    • + *
    • A list of completed {@link Container}.
    • + *
    • + * The available headroom for resources in the cluster for the + * application. + *
    • + *
    + *

    + * + * @see AMRMProtocol#allocate(AllocateRequest) + */ +@Public +@Unstable public interface AMResponse { + /** + * Should the ApplicationMaster reboot for being horribly + * out-of-sync with the ResourceManager as deigned by + * {@link #getResponseId()}? + * + * @return true if the ApplicationMaster should + * reboot, false otherwise + */ + @Public + @Stable public boolean getReboot(); + + @Private + @Unstable + public void setReboot(boolean reboot); + + /** + * Get the last response id. + * @return last response id + */ + @Public + @Stable public int getResponseId(); + @Private + @Unstable + public void setResponseId(int responseId); + + /** + * Get the list of newly allocated Container by the + * ResourceManager. + * @return list of newly allocated Container + */ + @Public + @Stable public List getNewContainerList(); + + @Private + @Unstable public Container getNewContainer(int index); + + @Private + @Unstable public int getNewContainerCount(); - public void setReboot(boolean reboot); - public void setResponseId(int responseId); - + @Private + @Unstable public void addAllNewContainers(List containers); + + @Private + @Unstable public void addNewContainer(Container container); + + @Private + @Unstable public void removeNewContainer(int index); + + @Private + @Unstable public void clearNewContainers(); - public void setAvailableResources(Resource limit); + /** + * Get the available headroom for resources in the cluster for the + * application. + * @return limit available headroom for resources in the cluster for the + * application + */ + @Public + @Stable public Resource getAvailableResources(); + @Private + @Unstable + public void setAvailableResources(Resource limit); + + /** + * Get the list of completed containers. + * @return the list of completed containers + */ + @Public + @Stable public List getFinishedContainerList(); + + @Private + @Unstable public Container getFinishedContainer(int index); + + @Private + @Unstable public int getFinishedContainerCount(); + + @Private + @Unstable public void addAllFinishedContainers(List containers); + + @Private + @Unstable public void addFinishedContainer(Container container); + + @Private + @Unstable public void removeFinishedContainer(int index); + + @Private + @Unstable public void clearFinishedContainers(); } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java index 5512db4beb..ca7a6f415a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java @@ -18,38 +18,172 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    ApplicationReport is a report of an application.

    + * + *

    It includes details such as: + *

      + *
    • {@link ApplicationId} of the application.
    • + *
    • Applications user.
    • + *
    • Application queue.
    • + *
    • Application name.
    • + *
    • Host on which the ApplicationMasteris running.
    • + *
    • RPC port of the ApplicationMaster.
    • + *
    • Tracking URL.
    • + *
    • {@link ApplicationState} of the application.
    • + *
    • Diagnostic information in case of errors.
    • + *
    • Start time of the application.
    • + *
    • Client token of the application (if security is enabled).
    • + *
    + *

    + * + * @see ClientRMProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest) + */ +@Public +@Stable public interface ApplicationReport { + /** + * Get the ApplicationId of the application. + * @return ApplicationId of the application + */ + @Public + @Stable ApplicationId getApplicationId(); + + @Private + @Unstable void setApplicationId(ApplicationId applicationId); + /** + * Get the user who submitted the application. + * @return user who submitted the application + */ + @Public + @Stable String getUser(); + + @Private + @Unstable void setUser(String user); + /** + * Get the queue to which the application was submitted. + * @return queue to which the application was submitted + */ + @Public + @Stable String getQueue(); + + @Private + @Unstable void setQueue(String queue); + /** + * Get the user-defined name of the application. + * @return name of the application + */ + @Public + @Stable String getName(); + + @Private + @Unstable void setName(String name); + /** + * Get the host on which the ApplicationMaster + * is running. + * @return host on which the ApplicationMaster + * is running + */ + @Public + @Stable String getHost(); + + @Private + @Unstable void setHost(String host); + /** + * Get the RPC port of the ApplicationMaster. + * @return RPC port of the ApplicationMaster + */ + @Public + @Stable int getRpcPort(); + + @Private + @Unstable void setRpcPort(int rpcPort); + /** + * Get the client token for communicating with the + * ApplicationMaster. + * @return client token for communicating with the + * ApplicationMaster + */ + @Public + @Stable String getClientToken(); + + @Private + @Unstable void setClientToken(String clientToken); + /** + * Get the ApplicationState of the application. + * @return ApplicationState of the application + */ + @Public + @Stable ApplicationState getState(); + + @Private + @Unstable void setState(ApplicationState state); + /** + * Get the diagnositic information of the application in case of + * errors. + * @return diagnositic information of the application in case + * of errors + */ + @Public + @Stable String getDiagnostics(); + + @Private + @Unstable void setDiagnostics(String diagnostics); + /** + * Get the tracking url for the application. + * @return tracking url for the application + */ + @Public + @Stable String getTrackingUrl(); + + @Private + @Unstable void setTrackingUrl(String url); + /** + * Get the start time of the application. + * @return start time of the application + */ + @Public + @Stable long getStartTime(); + + @Private + @Unstable void setStartTime(long startTime); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index e8de1f3f62..46511ca0d2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -22,68 +22,319 @@ import java.util.List; import java.util.Map; -public interface ApplicationSubmissionContext { - public abstract ApplicationId getApplicationId(); - public abstract String getApplicationName(); - public abstract Resource getMasterCapability(); - - public abstract Map getAllResources(); - public abstract URL getResource(String key); - - public abstract Map getAllResourcesTodo(); - public abstract LocalResource getResourceTodo(String key); - - public abstract List getFsTokenList(); - public abstract String getFsToken(int index); - public abstract int getFsTokenCount(); - - public abstract ByteBuffer getFsTokensTodo(); - - public abstract Map getAllEnvironment(); - public abstract String getEnvironment(String key); - - public abstract List getCommandList(); - public abstract String getCommand(int index); - public abstract int getCommandCount(); - - public abstract String getQueue(); - public abstract Priority getPriority(); - public abstract String getUser(); +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; +/** + *

    ApplicationSubmissionContext represents the all of the + * information needed by the ResourceManager to launch + * the ApplicationMaster for an application.

    + * + *

    It includes details such as: + *

      + *
    • {@link ApplicationId} of the application.
    • + *
    • + * {@link Resource} necessary to run the ApplicationMaster. + *
    • + *
    • Application user.
    • + *
    • Application name.
    • + *
    • {@link Priority} of the application.
    • + *
    • Security tokens (if security is enabled).
    • + *
    • + * {@link LocalResource} necessary for running the + * ApplicationMaster container such + * as binaries, jar, shared-objects, side-files etc. + *
    • + *
    • + * Environment variables for the launched ApplicationMaster + * process. + *
    • + *
    • Command to launch the ApplicationMaster.
    • + *
    + *

    + * + * @see ClientRMProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) + */ +@Public +@Stable +public interface ApplicationSubmissionContext { + /** + * Get the ApplicationId of the submitted application. + * @return ApplicationId of the submitted application + */ + @Public + @Stable + public ApplicationId getApplicationId(); + /** + * Set the ApplicationId of the submitted application. + * @param appplicationId ApplicationId of the submitted + * application + */ + @Public + @Stable + public void setApplicationId(ApplicationId appplicationId); + + /** + * Get the application name. + * @return application name + */ + @Public + @Stable + public String getApplicationName(); - public abstract void setApplicationId(ApplicationId appplicationId); - public abstract void setApplicationName(String applicationName); - public abstract void setMasterCapability(Resource masterCapability); + /** + * Set the application name. + * @param applicationName application name + */ + @Public + @Stable + public void setApplicationName(String applicationName); - public abstract void addAllResources(Map resources); - public abstract void setResource(String key, URL url); - public abstract void removeResource(String key); - public abstract void clearResources(); + /** + * Get the queue to which the application is being submitted. + * @return queue to which the application is being submitted + */ + @Public + @Stable + public String getQueue(); - public abstract void addAllResourcesTodo(Map resourcesTodo); - public abstract void setResourceTodo(String key, LocalResource localResource); - public abstract void removeResourceTodo(String key); - public abstract void clearResourcesTodo(); + /** + * Set the queue to which the application is being submitted + * @param queue queue to which the application is being submitted + */ + @Public + @Stable + public void setQueue(String queue); - public abstract void addAllFsTokens(List fsTokens); - public abstract void addFsToken(String fsToken); - public abstract void removeFsToken(int index); - public abstract void clearFsTokens(); + /** + * Get the Priority of the application. + * @return Priority of the application + */ + @Public + @Stable + public Priority getPriority(); + + /** + * Set the Priority of the application. + * @param priority Priority of the application + */ + @Public + @Stable + public void setPriority(Priority priority); - public abstract void setFsTokensTodo(ByteBuffer fsTokensTodo); + /** + * Get the user submitting the application. + * @return user submitting the application + */ + @Public + @Stable + public String getUser(); - public abstract void addAllEnvironment(Map environment); - public abstract void setEnvironment(String key, String env); - public abstract void removeEnvironment(String key); - public abstract void clearEnvironment(); + /** + * Set the user submitting the application. + * @param user user submitting the application + */ + @Public + @Stable + public void setUser(String user); - public abstract void addAllCommands(List commands); - public abstract void addCommand(String command); - public abstract void removeCommand(int index); - public abstract void clearCommands(); + /** + * Get the Resource required to run the + * ApplicationMaster. + * @return Resource required to run the + * ApplicationMaster + */ + @Public + @Stable + public Resource getMasterCapability(); - public abstract void setQueue(String queue); - public abstract void setPriority(Priority priority); - public abstract void setUser(String user); + /** + * Set Resource required to run the + * ApplicationMaster. + * @param masterCapability Resource required to run the + * ApplicationMaster + */ + @Public + @Stable + public void setMasterCapability(Resource masterCapability); + + @Private + @Unstable + public Map getAllResources(); + + @Private + @Unstable + public URL getResource(String key); + + @Private + @Unstable + public void addAllResources(Map resources); + + @Private + @Unstable + public void setResource(String key, URL url); + + @Private + @Unstable + public void removeResource(String key); + + @Private + @Unstable + public void clearResources(); + + /** + * Get all the LocalResource required to run the + * ApplicationMaster. + * @return LocalResource required to run the + * ApplicationMaster + */ + @Public + @Stable + public Map getAllResourcesTodo(); + + @Private + @Unstable + public LocalResource getResourceTodo(String key); + + /** + * Add all the LocalResource required to run the + * ApplicationMaster. + * @param resources all LocalResource required to run the + * ApplicationMaster + */ + @Public + @Stable + public void addAllResourcesTodo(Map resources); + + @Private + @Unstable + public void setResourceTodo(String key, LocalResource localResource); + + @Private + @Unstable + public void removeResourceTodo(String key); + + @Private + @Unstable + public void clearResourcesTodo(); + + @Private + @Unstable + public List getFsTokenList(); + + @Private + @Unstable + public String getFsToken(int index); + + @Private + @Unstable + public int getFsTokenCount(); + + @Private + @Unstable + public void addAllFsTokens(List fsTokens); + + @Private + @Unstable + public void addFsToken(String fsToken); + + @Private + @Unstable + public void removeFsToken(int index); + + @Private + @Unstable + public void clearFsTokens(); + + /** + * Get file-system tokens for the ApplicationMaster. + * @return file-system tokens for the ApplicationMaster + */ + @Public + @Stable + public ByteBuffer getFsTokensTodo(); + + /** + * Set file-system tokens for the ApplicationMaster. + * @param fsTokens file-system tokens for the ApplicationMaster + */ + @Public + @Stable + public void setFsTokensTodo(ByteBuffer fsTokens); + + /** + * Get the environment variables for the + * ApplicationMaster. + * @return environment variables for the ApplicationMaster + */ + @Public + @Stable + public Map getAllEnvironment(); + + @Private + @Unstable + public String getEnvironment(String key); + + /** + * Add all of the environment variables for the + * ApplicationMaster. + * @param environment environment variables for the + * ApplicationMaster + */ + @Public + @Stable + public void addAllEnvironment(Map environment); + + @Private + @Unstable + public void setEnvironment(String key, String env); + + @Private + @Unstable + public void removeEnvironment(String key); + + @Private + @Unstable + public void clearEnvironment(); + + /** + * Get the commands to launch the ApplicationMaster. + * @return commands to launch the ApplicationMaster + */ + @Public + @Stable + public List getCommandList(); + + @Private + @Unstable + public String getCommand(int index); + + @Private + @Unstable + public int getCommandCount(); + + /** + * Add all of the commands to launch the + * ApplicationMaster. + * @param commands commands to launch the ApplicationMaster + */ + @Public + @Stable + public void addAllCommands(List commands); + + @Private + @Unstable + public void addCommand(String command); + + @Private + @Unstable + public void removeCommand(int index); + + @Private + @Unstable + public void clearCommands(); } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java index f9a7a17980..cf5c9d0d4e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java @@ -22,50 +22,233 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

    ContainerLaunchContext represents the all of the information + * needed by the NodeManager to launch a container.

    + * + *

    It includes details such as: + *

      + *
    • {@link ContainerId} of the container.
    • + *
    • {@link Resource} allocated to the container.
    • + *
    • User to whom the container is allocated.
    • + *
    • Security tokens (if security is enabled).
    • + *
    • + * {@link LocalResource} necessary for running the container such + * as binaries, jar, shared-objects, side-files etc. + *
    • + *
    • Optional, application-specific binary service data.
    • + *
    • Environment variables for the launched process.
    • + *
    • Command to launch the container.
    • + *
    + *

    + * + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public interface ContainerLaunchContext { + /** + * Get ContainerId of container to be launched. + * @return ContainerId of container to be launched + */ + @Public + @Stable ContainerId getContainerId(); + + /** + * Set ContainerId of container to be launched. + * @param containerId et ContainerId of container to be launched + */ + @Public + @Stable + void setContainerId(ContainerId containerId); + + /** + * Get the user to whom the container has been allocated. + * @return the user to whom the container has been allocated + */ + @Public + @Stable String getUser(); + + /** + * Set the user to whom the container has been allocated + * @param user user to whom the container has been allocated + */ + @Public + @Stable + void setUser(String user); + + /** + * Get the Resource allocated to the container by the + * ResourceManager. + * @return Resource allocated to the container by the + * ResourceManager + */ + @Public + @Stable Resource getResource(); - - Map getAllLocalResources(); - LocalResource getLocalResource(String key); - - + + /** + * Set the Resource allocated to the container by the + * ResourceManager. + * @param resource allocated resource + */ + @Public + @Stable + void setResource(Resource resource); + + /** + * Get security tokens (if security is enabled). + * @return security tokens (if security is enabled) + */ + @Public + @Stable ByteBuffer getContainerTokens(); + + /** + * Set security tokens (if security is enabled). + * @param containerToken security tokens + */ + @Public + @Stable + void setContainerTokens(ByteBuffer containerToken); + + /** + * Get all LocalResource required by the container. + * @return all LocalResource required by the container + */ + @Public + @Stable + Map getAllLocalResources(); + @Private + @Unstable + LocalResource getLocalResource(String key); + + /** + * Add all LocalResource required by the container. + * @param localResources LocalResource required by the container + */ + @Public + @Stable + void addAllLocalResources(Map localResources); + + @Private + @Unstable + void setLocalResource(String key, LocalResource value); + + @Private + @Unstable + void removeLocalResource(String key); + + @Private + @Unstable + void clearLocalResources(); + + /** + * Get application-specific binary service data. + * @return application-specific binary service data + */ + @Public + @Stable Map getAllServiceData(); - ByteBuffer getServiceData(String key); + @Private + @Unstable + ByteBuffer getServiceData(String key); + + /** + * Add add application-specific binary service data. + * @param serviceData application-specific binary service data + */ + @Public + @Stable + void addAllServiceData(Map serviceData); + + @Private + @Unstable + void setServiceData(String key, ByteBuffer value); + + @Private + @Unstable + void removeServiceData(String key); + + @Private + @Unstable + void clearServiceData(); + + /** + * Get environment variables for the launched container. + * @return environment variables for the launched container + */ + @Public + @Stable Map getAllEnv(); + + @Private + @Unstable String getEnv(String key); + /** + * Add environment variables for the launched container. + * @param env environment variables for the launched container + */ + @Public + @Stable + void addAllEnv(Map env); + + @Private + @Unstable + void setEnv(String key, String value); + + @Private + @Unstable + void removeEnv(String key); + + @Private + @Unstable + void clearEnv(); + + /** + * Get the list of commands for launching the container. + * @return the list of commands for launching the container + */ + @Public + @Stable List getCommandList(); + + @Private + @Unstable String getCommand(int index); + + @Private + @Unstable int getCommandCount(); - void setContainerId(ContainerId containerId); - void setUser(String user); - void setResource(Resource resource); - - void addAllLocalResources(Map localResources); - void setLocalResource(String key, LocalResource value); - void removeLocalResource(String key); - void clearLocalResources(); - - void setContainerTokens(ByteBuffer containerToken); - - void addAllServiceData(Map serviceData); - void setServiceData(String key, ByteBuffer value); - void removeServiceData(String key); - void clearServiceData(); - - void addAllEnv(Map env); - void setEnv(String key, String value); - void removeEnv(String key); - void clearEnv(); - + /** + * Add the list of commands for launching the container. + * @param commands the list of commands for launching the container + */ + @Public + @Stable void addAllCommands(List commands); + + @Private + @Unstable void addCommand(String command); + + @Private + @Unstable void removeCommand(int index); + + @Private + @Unstable void clearCommands(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java index 0419a03f4d..10dd23a3fa 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java @@ -18,16 +18,92 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

    LocalResource represents a local resource required to + * run a container.

    + * + *

    The NodeManager is responsible for localizing the resource + * prior to launching the container.

    + * + *

    Applications can specify {@link LocalResourceType} and + * {@link LocalResourceVisibility}.

    + * + * @see LocalResourceType + * @see LocalResourceVisibility + * @see ContainerLaunchContext + * @see ApplicationSubmissionContext + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public interface LocalResource { - public abstract URL getResource(); - public abstract long getSize(); - public abstract long getTimestamp(); - public abstract LocalResourceType getType(); - public abstract LocalResourceVisibility getVisibility(); + /** + * Get the location of the resource to be localized. + * @return location of the resource to be localized + */ + public URL getResource(); - public abstract void setResource(URL resource); - public abstract void setSize(long size); - public abstract void setTimestamp(long timestamp); - public abstract void setType(LocalResourceType type); - public abstract void setVisibility(LocalResourceVisibility visibility); + /** + * Set location of the resource to be localized. + * @param resource location of the resource to be localized + */ + public void setResource(URL resource); + + /** + * Get the size of the resource to be localized. + * @return size of the resource to be localized + */ + public long getSize(); + + /** + * Set the size of the resource to be localized. + * @param size size of the resource to be localized + */ + public void setSize(long size); + + /** + * Get the original timestamp of the resource to be localized, used + * for verification. + * @return timestamp of the resource to be localized + */ + public long getTimestamp(); + + /** + * Set the timestamp of the resource to be localized, used + * for verification. + * @param timestamp timestamp of the resource to be localized + */ + public void setTimestamp(long timestamp); + + /** + * Get the LocalResourceType of the resource to be localized. + * @return LocalResourceType of the resource to be localized + */ + public LocalResourceType getType(); + + /** + * Set the LocalResourceType of the resource to be localized. + * @param type LocalResourceType of the resource to be localized + */ + public void setType(LocalResourceType type); + + /** + * Get the LocalResourceVisibility of the resource to be + * localized. + * @return LocalResourceVisibility of the resource to be + * localized + */ + public LocalResourceVisibility getVisibility(); + + /** + * Set the LocalResourceVisibility of the resource to be + * localized. + * @param visibility LocalResourceVisibility of the resource to be + * localized + */ + public void setVisibility(LocalResourceVisibility visibility); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java index 72a1dd04d2..0cfed1c4d2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java @@ -18,6 +18,42 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

    LocalResourceType specifies the type + * of a resource localized by the NodeManager.

    + * + *

    The type can be one of: + *

      + *
    • + * {@link #FILE} - Regular file i.e. uninterpreted bytes. + *
    • + *
    • + * {@link #ARCHIVE} - Archive, which is automatically unarchived by the + * NodeManager. + *
    • + *
    + *

    + * + * @see LocalResource + * @see ContainerLaunchContext + * @see ApplicationSubmissionContext + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public enum LocalResourceType { - ARCHIVE, FILE + + /** + * Archive, which is automatically unarchived by the NodeManager. + */ + ARCHIVE, + + /** + * Regular file i.e. uninterpreted bytes. + */ + FILE } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java index 45932131e6..232ab0a353 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java @@ -18,6 +18,48 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

    LocalResourceVisibility specifies the visibility + * of a resource localized by the NodeManager.

    + * + *

    The visibility can be one of: + *

      + *
    • {@link #PUBLIC} - Shared by all users on the node.
    • + *
    • + * {@link #PRIVATE} - Shared among all applications of the + * same user on the node. + *
    • + *
    • + * {@link #APPLICATION} - Shared only among containers of the + * same application on the node. + *
    • + *
    + *

    + * + * @see LocalResource + * @see ContainerLaunchContext + * @see ApplicationSubmissionContext + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public enum LocalResourceVisibility { - PUBLIC, PRIVATE, APPLICATION + /** + * Shared by all users on the node. + */ + PUBLIC, + + /** + * Shared among all applications of the same user on the node. + */ + PRIVATE, + + /** + * Shared only among containers of the same application on the node. + */ + APPLICATION } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java index 01f5f6017b..c507e45b1a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java @@ -17,17 +17,69 @@ */ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    NodeHealthStatus is a summary of the health status of the + * node.

    + * + *

    It includes information such as: + *

      + *
    • + * An indicator of whether the node is healthy, as determined by the + * health-check script. + *
    • + *
    • The previous time at which the health status was reported.
    • + *
    • A diagnostic report on the health status.
    • + *
    • + *
    • + *
    + *

    + * + * @see NodeReport + * @see ClientRMProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest) + */ +@Public +@Stable public interface NodeHealthStatus { + /** + * Is the node healthy? + * @return true if the node is healthy, else false + */ + @Public + @Stable boolean getIsNodeHealthy(); - String getHealthReport(); - - long getLastHealthReportTime(); - + @Private + @Unstable void setIsNodeHealthy(boolean isNodeHealthy); + /** + * Get the diagnostic health report of the node. + * @return diagnostic health report of the node + */ + @Public + @Stable + String getHealthReport(); + + @Private + @Unstable void setHealthReport(String healthReport); + /** + * Get the last timestamp at which the health report was received. + * @return last timestamp at which the health report was received + */ + @Public + @Stable + long getLastHealthReportTime(); + + @Private + @Unstable void setLastHealthReportTime(long lastHealthReport); } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java index 7e333e909b..8241db6b1b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java @@ -18,19 +18,113 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    NodeReport is a summary of runtime information of a + * node in the cluster.

    + * + *

    It includes details such as: + *

      + *
    • {@link NodeId} of the node.
    • + *
    • HTTP Tracking URL of the node.
    • + *
    • Rack name for the node.
    • + *
    • Used {@link Resource} on the node.
    • + *
    • Total available {@link Resource} of the node.
    • + *
    • Number of running containers on the node.
    • + *
    • {@link NodeHealthStatus} of the node.
    • + *
    + *

    + * + * @see NodeHealthStatus + * @see ClientRMProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest) + */ +@Public +@Stable public interface NodeReport { + /** + * Get the NodeId of the node. + * @return NodeId of the node + */ NodeId getNodeId(); + + @Private + @Unstable void setNodeId(NodeId nodeId); + + /** + * Get the http address of the node. + * @return http address of the node + */ + @Public + @Stable String getHttpAddress(); + + @Private + @Unstable void setHttpAddress(String httpAddress); + + /** + * Get the rack name for the node. + * @return rack name for the node + */ + @Public + @Stable String getRackName(); + + @Private + @Unstable void setRackName(String rackName); + + /** + * Get used Resource on the node. + * @return used Resource on the node + */ + @Public + @Stable Resource getUsed(); + + @Private + @Unstable void setUsed(Resource used); + + /** + * Get the total Resource on the node. + * @return total Resource on the node + */ + @Public + @Stable Resource getCapability(); + + @Private + @Unstable void setCapability(Resource capability); + + /** + * Get the number of running containers on the node. + * @return number of running containers on the node + */ + @Public + @Stable int getNumContainers(); + + @Private + @Unstable void setNumContainers(int numContainers); + + /** + * Get the NodeHealthStatus of the node. + * @return NodeHealthStatus of the node + */ + @Public + @Stable NodeHealthStatus getNodeHealthStatus(); + + @Private + @Unstable void setNodeHealthStatus(NodeHealthStatus nodeHealthStatus); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java index 826222880a..eab91fa3a8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java @@ -18,8 +18,39 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    QueueACL enumerates the various ACLs for queues.

    + * + *

    The ACLs are one of: + *

      + *
    • {@link #SUBMIT_JOB} - ACL to submit jobs to the queue.
    • + *
    • {@link #ADMINISTER_QUEUE} - ACL to administer the queue.
    • + *
    • {@link #ADMINISTER_JOBS} - ACL to administer jobs in the queue.
    • + *
    + *

    + * + * @see QueueInfo + * @see ClientRMProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest) + */ +@Public +@Stable public enum QueueACL { + /** + * ACL to submit jobs to the queue. + */ SUBMIT_JOB, + + /** + * ACL to administer the queue. + */ ADMINISTER_QUEUE, + + /** + * ACL to administer jobs in the queue. + */ ADMINISTER_JOBS; // currently unused } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java index 85c51e9fda..a783c3ced8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java @@ -20,25 +20,114 @@ import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    QueueInfo

    is a report of the runtime information of the queue.

    + * + *

    It includes information such as: + *

      + *
    • Queue name.
    • + *
    • Capacity of the queue.
    • + *
    • Maximum capacity of the queue.
    • + *
    • Current capacity of the queue.
    • + *
    • Child queues.
    • + *
    • Running applications.
    • + *
    • {@link QueueState} of the queue.
    • + *
    + *

    + * + * @see QueueState + * @see ClientRMProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest) + */ +@Public +@Stable public interface QueueInfo { + /** + * Get the name of the queue. + * @return name of the queue + */ + @Public + @Stable String getQueueName(); + + @Private + @Unstable void setQueueName(String queueName); + /** + * Get the configured capacity of the queue. + * @return configured capacity of the queue + */ + @Public + @Stable float getCapacity(); + + @Private + @Unstable void setCapacity(float capacity); + /** + * Get the maximum capacity of the queue. + * @return maximum capacity of the queue + */ + @Public + @Stable float getMaximumCapacity(); + + @Private + @Unstable void setMaximumCapacity(float maximumCapacity); + /** + * Get the current capacity of the queue. + * @return current capacity of the queue + */ + @Public + @Stable float getCurrentCapacity(); + + @Private + @Unstable void setCurrentCapacity(float currentCapacity); + /** + * Get the child queues of the queue. + * @return child queues of the queue + */ + @Public + @Stable List getChildQueues(); + + @Private + @Unstable void setChildQueues(List childQueues); + /** + * Get the running applications of the queue. + * @return running applications of the queue + */ + @Public + @Stable List getApplications(); + + @Private + @Unstable void setApplications(List applications); + /** + * Get the QueueState of the queue. + * @return QueueState of the queue + */ + @Public + @Stable QueueState getQueueState(); + + @Private + @Unstable void setQueueState(QueueState queueState); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java index 32696b6663..3d01966935 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java @@ -18,10 +18,33 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + /** - * State of a Queue + *

    State of a Queue.

    + * + *

    A queue is one of: + *

      + *
    • {@link #RUNNING} - normal state.
    • + *
    • {@link #STOPPED} - not accepting new application submissions. + *
    + *

    + * + * @see QueueInfo + * @see ClientRMProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest) */ +@Public +@Stable public enum QueueState { - STOPPED, + /** + * Stopped - Not accepting submissions of new applications. + */ + STOPPED, + + /** + * Running - normal operation. + */ RUNNING } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java index b72159acac..704cc16e90 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java @@ -20,10 +20,43 @@ import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

    QueueUserACLInfo provides information {@link QueueACL} for + * the given user.

    + * + * @see QueueACL + * @see ClientRMProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest) + */ +@Public +@Stable public interface QueueUserACLInfo { + /** + * Get the queue name of the queue. + * @return queue name of the queue + */ + @Public + @Stable String getQueueName(); - void setQueueName(String queueName); + @Private + @Unstable + void setQueueName(String queueName); + + /** + * Get the list of QueueACL for the given user. + * @return list of QueueACL for the given user + */ + @Public + @Stable List getUserAcls(); + + @Private + @Unstable void setUserAcls(List acls); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java index 00b2630e46..a4e2d49d96 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java @@ -29,8 +29,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProtoOrBuilder; -import org.mortbay.log.Log; - public class ContainerIdPBImpl extends ProtoBase implements ContainerId { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto index 2c07abeb18..753c6b8c9a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto @@ -143,6 +143,7 @@ message StartContainerRequestProto { } message StartContainerResponseProto { + repeated StringBytesMapProto service_response = 1; } message StopContainerRequestProto { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java index 399a275e5d..b6f96597e4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java @@ -317,15 +317,15 @@ public Server(Object instance, Configuration conf, String bindAddress, } @Override - public Writable call(Class protocol, Writable writableRequest, + public Writable call(String protocol, Writable writableRequest, long receiveTime) throws IOException { ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest; ProtoSpecificRpcRequest rpcRequest = request.message; String methodName = rpcRequest.getMethodName(); - System.out.println("Call: protocol=" + protocol.getCanonicalName() + ", method=" + System.out.println("Call: protocol=" + protocol + ", method=" + methodName); if (verbose) - log("Call: protocol=" + protocol.getCanonicalName() + ", method=" + log("Call: protocol=" + protocol + ", method=" + methodName); MethodDescriptor methodDescriptor = service.getDescriptorForType() .findMethodByName(methodName); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java index 92dc7affc5..0d02cb96b4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java @@ -50,7 +50,7 @@ public class ConverterUtils { * * @param url * url to convert - * @return + * @return path from {@link URL} * @throws URISyntaxException */ public static Path getPathFromYarnURL(URL url) throws URISyntaxException { @@ -63,8 +63,8 @@ public static Path getPathFromYarnURL(URL url) throws URISyntaxException { /** * change from CharSequence to string for map key and value - * @param env - * @return + * @param env map for converting + * @return string,string map */ public static Map convertToString( Map env) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java index a934f0c4c4..db5f532987 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java @@ -221,8 +221,7 @@ public ProcfsBasedProcessTree getProcessTree() { } /** Verify that the given process id is same as its process group id. - * @param pidStr Process id of the to-be-verified-process - * @param procfsDir Procfs root dir + * @return true if the process id matches else return false. */ public boolean checkPidPgrpidForMatch() { return checkPidPgrpidForMatch(pid, PROCFS); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java index fe6471d203..4b70afe74e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java @@ -62,7 +62,7 @@ public synchronized static void init(Configuration conf) { * right resolver implementation. * @param conf * @param hostName - * @return + * @return node {@link Node} after resolving the hostname */ public static Node resolve(Configuration conf, String hostName) { init(conf); @@ -74,7 +74,7 @@ public static Node resolve(Configuration conf, String hostName) { * network topology. This method doesn't initialize the class. * Call {@link #init(Configuration)} explicitly. * @param hostName - * @return + * @return node {@link Node} after resolving the hostname */ public static Node resolve(String hostName) { if (!initCalled) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 5e2de2b226..25b26f4798 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -77,11 +77,18 @@ public abstract void startLocalizer(Path nmPrivateContainerTokens, List localDirs) throws IOException, InterruptedException; + /** * Launch the container on the node. This is a blocking call and returns only * when the container exits. - * - * @param launchCtxt + * @param container the container to be launched + * @param nmPrivateContainerScriptPath the path for launch script + * @param nmPrivateTokensPath the path for tokens for the container + * @param user the user of the container + * @param appId the appId of the container + * @param containerWorkDir the work dir for the container + * @return the return status of the launch + * @throws IOException */ public abstract int launchContainer(Container container, Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java index f62a5cb989..60206e0d1b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java @@ -60,11 +60,14 @@ public DeletionService(ContainerExecutor exec) { this.exec = exec; this.debugDelay = 0; } - + + /** + * /** * Delete the path(s) as this user. * @param user The user to delete as, or the JVM user if null - * @param p Paths to delete + * @param subDir the sub directory name + * @param baseDirs the base directories which contains the subDir's */ public void delete(String user, Path subDir, Path... baseDirs) { // TODO if parent owned by NM, rename within parent inline diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java new file mode 100644 index 0000000000..cb4021f8ee --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager; + +import java.net.InetAddress; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; + +/** + * Manages NodeManager audit logs. + * + * Audit log format is written as key=value pairs. Tab separated. + */ +public class NMAuditLogger { + private static final Log LOG = LogFactory.getLog(NMAuditLogger.class); + + static enum Keys {USER, OPERATION, TARGET, RESULT, IP, + DESCRIPTION, APPID, CONTAINERID} + + public static class AuditConstants { + static final String SUCCESS = "SUCCESS"; + static final String FAILURE = "FAILURE"; + static final String KEY_VAL_SEPARATOR = "="; + static final char PAIR_SEPARATOR = '\t'; + + // Some commonly used descriptions + public static final String START_CONTAINER = "Start Container Request"; + public static final String STOP_CONTAINER = "Stop Container Request"; + public static final String FINISH_SUCCESS_CONTAINER = "Container Finished - Succeeded"; + public static final String FINISH_FAILED_CONTAINER = "Container Finished - Failed"; + public static final String FINISH_KILLED_CONTAINER = "Container Finished - Killed"; + } + + /** + * A helper api for creating an audit log for a successful event. + */ + static String createSuccessLog(String user, String operation, String target, + ApplicationId appId, ContainerId containerId) { + StringBuilder b = new StringBuilder(); + start(Keys.USER, user, b); + addRemoteIP(b); + add(Keys.OPERATION, operation, b); + add(Keys.TARGET, target ,b); + add(Keys.RESULT, AuditConstants.SUCCESS, b); + if (appId != null) { + add(Keys.APPID, appId.toString(), b); + } + if (containerId != null) { + add(Keys.CONTAINERID, containerId.toString(), b); + } + return b.toString(); + } + + /** + * Create a readable and parseable audit log string for a successful event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user + * @param target The target on which the operation is being performed. + * @param appId Application Id in which operation was performed. + * @param containerId Container Id in which operation was performed. + * + *

    + * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logSuccess(String user, String operation, String target, + ApplicationId appId, ContainerId containerId) { + if (LOG.isInfoEnabled()) { + LOG.info(createSuccessLog(user, operation, target, appId, containerId)); + } + } + + /** + * Create a readable and parseable audit log string for a successful event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user + * @param target The target on which the operation is being performed. + * + *

    + * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logSuccess(String user, String operation, String target) { + if (LOG.isInfoEnabled()) { + LOG.info(createSuccessLog(user, operation, target, null, null)); + } + } + + /** + * A helper api for creating an audit log for a failure event. + * This is factored out for testing purpose. + */ + static String createFailureLog(String user, String operation, String target, + String description, ApplicationId appId, ContainerId containerId) { + StringBuilder b = new StringBuilder(); + start(Keys.USER, user, b); + addRemoteIP(b); + add(Keys.OPERATION, operation, b); + add(Keys.TARGET, target ,b); + add(Keys.RESULT, AuditConstants.FAILURE, b); + add(Keys.DESCRIPTION, description, b); + if (appId != null) { + add(Keys.APPID, appId.toString(), b); + } + if (containerId != null) { + add(Keys.CONTAINERID, containerId.toString(), b); + } + return b.toString(); + } + + /** + * Create a readable and parseable audit log string for a failed event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user. + * @param target The target on which the operation is being performed. + * @param description Some additional information as to why the operation + * failed. + * @param appId ApplicationId in which operation was performed. + * @param containerId Container Id in which operation was performed. + * + *

    + * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logFailure(String user, String operation, String target, + String description, ApplicationId appId, ContainerId containerId) { + if (LOG.isWarnEnabled()) { + LOG.warn(createFailureLog(user, operation, target, description, appId, containerId)); + } + } + + /** + * Create a readable and parseable audit log string for a failed event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user. + * @param target The target on which the operation is being performed. + * @param description Some additional information as to why the operation + * failed. + * + *

    + * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logFailure(String user, String operation, + String target, String description) { + if (LOG.isWarnEnabled()) { + LOG.warn(createFailureLog(user, operation, target, description, null, null)); + } + } + + /** + * A helper api to add remote IP address + */ + static void addRemoteIP(StringBuilder b) { + InetAddress ip = Server.getRemoteIp(); + // ip address can be null for testcases + if (ip != null) { + add(Keys.IP, ip.getHostAddress(), b); + } + } + + /** + * Adds the first key-val pair to the passed builder in the following format + * key=value + */ + static void start(Keys key, String value, StringBuilder b) { + b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); + } + + /** + * Appends the key-val pair to the passed builder in the following format + * key=value + */ + static void add(Keys key, String value, StringBuilder b) { + b.append(AuditConstants.PAIR_SEPARATOR).append(key.name()) + .append(AuditConstants.KEY_VAL_SEPARATOR).append(value); + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index d02c30d4db..b826042278 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -26,11 +26,14 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.NodeHealthCheckerService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -49,6 +52,7 @@ import org.apache.hadoop.yarn.service.Service; public class NodeManager extends CompositeService { + private static final Log LOG = LogFactory.getLog(NodeManager.class); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); public NodeManager() { @@ -185,6 +189,7 @@ public NodeHealthStatus getNodeHealthStatus() { } public static void main(String[] args) { + StringUtils.startupShutdownMessage(NodeManager.class, args, LOG); NodeManager nodeManager = new NodeManager(); YarnConfiguration conf = new YarnConfiguration(); nodeManager.init(conf); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java index 4abf4a6a3e..254ff2a671 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java @@ -44,11 +44,14 @@ public class AuxServices extends AbstractService public static final String AUX_SERVICE_CLASS_FMT = "nodemanager.aux.service.%s.class"; public final Map serviceMap; + public final Map serviceMeta; public AuxServices() { super(AuxServices.class.getName()); serviceMap = Collections.synchronizedMap(new HashMap()); + serviceMeta = + Collections.synchronizedMap(new HashMap()); // Obtain services from configuration in init() } @@ -63,6 +66,15 @@ Collection getServices() { return Collections.unmodifiableCollection(serviceMap.values()); } + /** + * @return the meta data for all registered services, that have been started. + * If a service has not been started no metadata will be available. The key + * the the name of the service as defined in the configuration. + */ + public Map getMeta() { + return Collections.unmodifiableMap(serviceMeta); + } + @Override public void init(Configuration conf) { Collection auxNames = conf.getStringCollection(AUX_SERVICES); @@ -75,7 +87,15 @@ public void init(Configuration conf) { throw new RuntimeException("No class defiend for " + sName); } AuxiliaryService s = ReflectionUtils.newInstance(sClass, conf); - // TODO better use use s.getName()? + // TODO better use s.getName()? + if(!sName.equals(s.getName())) { + LOG.warn("The Auxilurary Service named '"+sName+"' in the " + +"configuration is for class "+sClass+" which has " + +"a name of '"+s.getName()+"'. Because these are " + +"not the same tools trying to send ServiceData and read " + +"Service Meta Data may have issues unless the refer to " + +"the name in the config."); + } addService(sName, s); s.init(conf); } catch (RuntimeException e) { @@ -90,9 +110,15 @@ public void init(Configuration conf) { public void start() { // TODO fork(?) services running as configured user // monitor for health, shutdown/restart(?) if any should die - for (Service service : serviceMap.values()) { + for (Map.Entry entry : serviceMap.entrySet()) { + AuxiliaryService service = entry.getValue(); + String name = entry.getKey(); service.start(); service.register(this); + ByteBuffer meta = service.getMeta(); + if(meta != null) { + serviceMeta.put(name, meta); + } } super.start(); } @@ -108,6 +134,7 @@ public void stop() { } } serviceMap.clear(); + serviceMeta.clear(); } } finally { super.stop(); @@ -146,6 +173,15 @@ public void handle(AuxServicesEvent event) { public interface AuxiliaryService extends Service { void initApp(String user, ApplicationId appId, ByteBuffer data); void stopApp(ApplicationId appId); + /** + * Retreive metadata for this service. This is likely going to be contact + * information so that applications can access the service remotely. Ideally + * each service should provide a method to parse out the information to a usable + * class. This will only be called after the services start method has finished. + * the result may be cached. + * @return metadata for this service that should be made avaiable to applications. + */ + ByteBuffer getMeta(); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index c4bf2c299a..49bf3f0ef0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -65,6 +65,8 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.NMConfig; import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; @@ -266,6 +268,10 @@ public StartContainerResponse startContainer(StartContainerRequest request) ContainerId containerID = launchContext.getContainerId(); ApplicationId applicationID = containerID.getAppId(); if (context.getContainers().putIfAbsent(containerID, container) != null) { + NMAuditLogger.logFailure(launchContext.getUser(), + AuditConstants.START_CONTAINER, "ContainerManagerImpl", + "Container already running on this node!", + applicationID, containerID); throw RPCUtil.getRemoteException("Container " + containerID + " already is running on this node!!"); } @@ -281,8 +287,14 @@ public StartContainerResponse startContainer(StartContainerRequest request) // TODO: Validate the request dispatcher.getEventHandler().handle(new ApplicationInitEvent(container)); + + NMAuditLogger.logSuccess(launchContext.getUser(), + AuditConstants.START_CONTAINER, "ContainerManageImpl", + applicationID, containerID); + StartContainerResponse response = recordFactory.newRecordInstance(StartContainerResponse.class); + response.addAllServiceResponse(auxiluaryServices.getMeta()); metrics.launchedContainer(); metrics.allocateContainer(launchContext.getResource()); return response; @@ -299,12 +311,23 @@ public StopContainerResponse stopContainer(StopContainerRequest request) Container container = this.context.getContainers().get(containerID); if (container == null) { LOG.warn("Trying to stop unknown container " + containerID); + NMAuditLogger.logFailure(container.getUser(), + AuditConstants.STOP_CONTAINER, "ContainerManagerImpl", + "Trying to stop unknown container!", + containerID.getAppId(), containerID); return response; // Return immediately. } dispatcher.getEventHandler().handle( new ContainerKillEvent(containerID, "Container killed by the ApplicationMaster.")); + // user logged here not ideal since just getting user from container but + // request doesn't have anything and should be coming from user of AM so + // should be the same or should be rejected by auth before here. + NMAuditLogger.logSuccess(container.getUser(), + AuditConstants.STOP_CONTAINER, "ContainerManageImpl", + containerID.getAppId(), containerID); + // TODO: Move this code to appropriate place once kill_container is // implemented. nodeStatusUpdater.sendOutofBandHeartBeat(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 660311c34b..8a4439d32b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerFinishedEvent; @@ -365,18 +367,28 @@ private void finished() { case EXITED_WITH_SUCCESS: metrics.endRunningContainer(); metrics.completedContainer(); + NMAuditLogger.logSuccess(getUser(), + AuditConstants.FINISH_SUCCESS_CONTAINER, "ContainerImpl", + getContainerID().getAppId(), getContainerID()); break; case EXITED_WITH_FAILURE: metrics.endRunningContainer(); // fall through case LOCALIZATION_FAILED: metrics.failedContainer(); + NMAuditLogger.logFailure(getUser(), + AuditConstants.FINISH_FAILED_CONTAINER, "ContainerImpl", + "Container failed with state: " + getContainerState(), + getContainerID().getAppId(), getContainerID()); break; case CONTAINER_CLEANEDUP_AFTER_KILL: metrics.endRunningContainer(); // fall through case NEW: metrics.killedContainer(); + NMAuditLogger.logSuccess(getUser(), + AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", + getContainerID().getAppId(), getContainerID()); } metrics.releaseContainer(getLaunchContext().getResource()); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index bcd2115d14..585ec852f3 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -100,7 +100,8 @@ public Integer call() { String appIdStr = app.toString(); Path containerLogDir = this.logDirsSelector.getLocalPathForWrite(appIdStr + Path.SEPARATOR - + containerIdStr, LocalDirAllocator.SIZE_UNKNOWN, this.conf); + + containerIdStr, LocalDirAllocator.SIZE_UNKNOWN, this.conf, + false); for (String str : command) { // TODO: Should we instead work via symlinks without this grammar? newCmds.add(str.replace(ApplicationConstants.LOG_DIR_EXPANSION_VAR, @@ -147,7 +148,7 @@ public Integer call() { + Path.SEPARATOR + user + Path.SEPARATOR + ContainerLocalizer.APPCACHE + Path.SEPARATOR + appIdStr + Path.SEPARATOR + containerIdStr, - LocalDirAllocator.SIZE_UNKNOWN, this.conf); + LocalDirAllocator.SIZE_UNKNOWN, this.conf, false); try { // /////////// Write out the container-script in the nmPrivate space. String[] localDirs = diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java new file mode 100644 index 0000000000..b642279206 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager; + +import java.net.InetAddress; +import java.net.InetSocketAddress; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.ipc.TestRPC.TestImpl; +import org.apache.hadoop.ipc.TestRPC.TestProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys; + +import org.apache.hadoop.net.NetUtils; + +import static org.mockito.Mockito.*; +import static junit.framework.Assert.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + + +/** + * Tests {@link NMAuditLogger}. + */ +public class TestNMAuditLogger { + private static final Log LOG = LogFactory.getLog(TestNMAuditLogger.class); + private static final String USER = "test"; + private static final String OPERATION = "oper"; + private static final String TARGET = "tgt"; + private static final String DESC = "description of an audit log"; + + private static final ApplicationId APPID = mock(ApplicationId.class); + private static final ContainerId CONTAINERID = mock(ContainerId.class); + + @Before + public void setUp() throws Exception { + when(APPID.toString()).thenReturn("app_1"); + when(CONTAINERID.toString()).thenReturn("container_1"); + } + + + /** + * Test the AuditLog format with key-val pair. + */ + @Test + public void testKeyValLogFormat() throws Exception { + StringBuilder actLog = new StringBuilder(); + StringBuilder expLog = new StringBuilder(); + // add the first k=v pair and check + NMAuditLogger.start(Keys.USER, USER, actLog); + expLog.append("USER=test"); + assertEquals(expLog.toString(), actLog.toString()); + + // append another k1=v1 pair to already added k=v and test + NMAuditLogger.add(Keys.OPERATION, OPERATION, actLog); + expLog.append("\tOPERATION=oper"); + assertEquals(expLog.toString(), actLog.toString()); + + // append another k1=null pair and test + NMAuditLogger.add(Keys.APPID, (String)null, actLog); + expLog.append("\tAPPID=null"); + assertEquals(expLog.toString(), actLog.toString()); + + // now add the target and check of the final string + NMAuditLogger.add(Keys.TARGET, TARGET, actLog); + expLog.append("\tTARGET=tgt"); + assertEquals(expLog.toString(), actLog.toString()); + } + + + /** + * Test the AuditLog format for successful events. + */ + private void testSuccessLogFormatHelper(boolean checkIP, + ApplicationId appId, ContainerId containerId) { + // check without the IP + String sLog = NMAuditLogger.createSuccessLog(USER, OPERATION, TARGET, + appId, containerId); + StringBuilder expLog = new StringBuilder(); + expLog.append("USER=test\t"); + if (checkIP) { + InetAddress ip = Server.getRemoteIp(); + expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t"); + } + expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=SUCCESS"); + if (appId != null) { + expLog.append("\tAPPID=app_1"); + } + if (containerId != null) { + expLog.append("\tCONTAINERID=container_1"); + } + assertEquals(expLog.toString(), sLog); + } + + /** + * Test the AuditLog format for successful events passing nulls. + */ + private void testSuccessLogNulls(boolean checkIP) { + String sLog = NMAuditLogger.createSuccessLog(null, null, null, + null, null); + StringBuilder expLog = new StringBuilder(); + expLog.append("USER=null\t"); + if (checkIP) { + InetAddress ip = Server.getRemoteIp(); + expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t"); + } + expLog.append("OPERATION=null\tTARGET=null\tRESULT=SUCCESS"); + assertEquals(expLog.toString(), sLog); + } + + /** + * Test the AuditLog format for successful events with the various + * parameters. + */ + private void testSuccessLogFormat(boolean checkIP) { + testSuccessLogFormatHelper(checkIP, null, null); + testSuccessLogFormatHelper(checkIP, APPID, null); + testSuccessLogFormatHelper(checkIP, null, CONTAINERID); + testSuccessLogFormatHelper(checkIP, APPID, CONTAINERID); + testSuccessLogNulls(checkIP); + } + + + /** + * Test the AuditLog format for failure events. + */ + private void testFailureLogFormatHelper(boolean checkIP, ApplicationId appId, + ContainerId containerId) { + String fLog = + NMAuditLogger.createFailureLog(USER, OPERATION, TARGET, DESC, appId, + containerId); + StringBuilder expLog = new StringBuilder(); + expLog.append("USER=test\t"); + if (checkIP) { + InetAddress ip = Server.getRemoteIp(); + expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t"); + } + expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=FAILURE\t"); + expLog.append("DESCRIPTION=description of an audit log"); + + if (appId != null) { + expLog.append("\tAPPID=app_1"); + } + if (containerId != null) { + expLog.append("\tCONTAINERID=container_1"); + } + assertEquals(expLog.toString(), fLog); + } + + /** + * Test the AuditLog format for failure events with the various + * parameters. + */ + private void testFailureLogFormat(boolean checkIP) { + testFailureLogFormatHelper(checkIP, null, null); + testFailureLogFormatHelper(checkIP, APPID, null); + testFailureLogFormatHelper(checkIP, null, CONTAINERID); + testFailureLogFormatHelper(checkIP, APPID, CONTAINERID); + } + + /** + * Test {@link NMAuditLogger} without IP set. + */ + @Test + public void testNMAuditLoggerWithoutIP() throws Exception { + // test without ip + testSuccessLogFormat(false); + testFailureLogFormat(false); + } + + /** + * A special extension of {@link TestImpl} RPC server with + * {@link TestImpl#ping()} testing the audit logs. + */ + private class MyTestRPCServer extends TestImpl { + @Override + public void ping() { + // test with ip set + testSuccessLogFormat(true); + testFailureLogFormat(true); + } + } + + /** + * Test {@link NMAuditLogger} with IP set. + */ + @Test + public void testNMAuditLoggerWithIP() throws Exception { + Configuration conf = new Configuration(); + // start the IPC server + Server server = RPC.getServer(new MyTestRPCServer(), "0.0.0.0", 0, conf); + server.start(); + + InetSocketAddress addr = NetUtils.getConnectAddress(server); + + // Make a client connection and test the audit log + TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class, + TestProtocol.versionID, addr, conf); + // Start the testcase + proxy.ping(); + + server.stop(); + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index e30374b4ae..2324708150 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -22,6 +22,7 @@ import static org.junit.Assert.*; import java.nio.ByteBuffer; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -44,10 +45,16 @@ static class LightService extends AbstractService private final int expected_appId; private int remaining_init; private int remaining_stop; + private ByteBuffer meta = null; + LightService(String name, char idef, int expected_appId) { + this(name, idef, expected_appId, null); + } + LightService(String name, char idef, int expected_appId, ByteBuffer meta) { super(name); this.idef = idef; this.expected_appId = expected_appId; + this.meta = meta; } @Override public void init(Configuration conf) { @@ -71,14 +78,18 @@ public void initApp(String user, ApplicationId appId, ByteBuffer data) { public void stopApp(ApplicationId appId) { assertEquals(expected_appId, appId.getId()); } + @Override + public ByteBuffer getMeta() { + return meta; + } } static class ServiceA extends LightService { - public ServiceA() { super("A", 'A', 65); } + public ServiceA() { super("A", 'A', 65, ByteBuffer.wrap("A".getBytes())); } } static class ServiceB extends LightService { - public ServiceB() { super("B", 'B', 66); } + public ServiceB() { super("B", 'B', 66, ByteBuffer.wrap("B".getBytes())); } } @Test @@ -139,6 +150,44 @@ public void testAuxServices() { } } + + @Test + public void testAuxServicesMeta() { + Configuration conf = new Configuration(); + conf.setStrings(AuxServices.AUX_SERVICES, new String[] { "Asrv", "Bsrv" }); + conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Asrv"), + ServiceA.class, Service.class); + conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Bsrv"), + ServiceB.class, Service.class); + final AuxServices aux = new AuxServices(); + aux.init(conf); + + int latch = 1; + for (Service s : aux.getServices()) { + assertEquals(INITED, s.getServiceState()); + if (s instanceof ServiceA) { latch *= 2; } + else if (s instanceof ServiceB) { latch *= 3; } + else fail("Unexpected service type " + s.getClass()); + } + assertEquals("Invalid mix of services", 6, latch); + aux.start(); + for (Service s : aux.getServices()) { + assertEquals(STARTED, s.getServiceState()); + } + + Map meta = aux.getMeta(); + assertEquals(2, meta.size()); + assertEquals("A", new String(meta.get("Asrv").array())); + assertEquals("B", new String(meta.get("Bsrv").array())); + + aux.stop(); + for (Service s : aux.getServices()) { + assertEquals(STOPPED, s.getServiceState()); + } + } + + + @Test public void testAuxUnexpectedStop() { Configuration conf = new Configuration(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 061f4ee5ab..01508a2142 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.service.AbstractService; public class AdminService extends AbstractService implements RMAdminProtocol { @@ -113,40 +114,54 @@ public void stop() { super.stop(); } - private void checkAcls(String method) throws YarnRemoteException { + private UserGroupInformation checkAcls(String method) throws YarnRemoteException { + UserGroupInformation user; try { - UserGroupInformation user = UserGroupInformation.getCurrentUser(); - if (!adminAcl.isUserAllowed(user)) { - LOG.warn("User " + user.getShortUserName() + " doesn't have permission" + - " to call '" + method + "'"); - - throw RPCUtil.getRemoteException( - new AccessControlException("User " + user.getShortUserName() + - " doesn't have permission" + - " to call '" + method + "'") - ); - } - - LOG.info("RM Admin: " + method + " invoked by user " + - user.getShortUserName()); - + user = UserGroupInformation.getCurrentUser(); } catch (IOException ioe) { LOG.warn("Couldn't get current user", ioe); + + RMAuditLogger.logFailure("UNKNOWN", method, + adminAcl.toString(), "AdminService", + "Couldn't get current user"); throw RPCUtil.getRemoteException(ioe); } + + if (!adminAcl.isUserAllowed(user)) { + LOG.warn("User " + user.getShortUserName() + " doesn't have permission" + + " to call '" + method + "'"); + + RMAuditLogger.logFailure(user.getShortUserName(), method, + adminAcl.toString(), "AdminService", + AuditConstants.UNAUTHORIZED_USER); + + throw RPCUtil.getRemoteException( + new AccessControlException("User " + user.getShortUserName() + + " doesn't have permission" + + " to call '" + method + "'") + ); + } + LOG.info("RM Admin: " + method + " invoked by user " + + user.getShortUserName()); + + return user; } @Override public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) throws YarnRemoteException { - checkAcls("refreshQueues"); - + UserGroupInformation user = checkAcls("refreshQueues"); try { scheduler.reinitialize(conf, null, null); // ContainerTokenSecretManager can't // be 'refreshed' + RMAuditLogger.logSuccess(user.getShortUserName(), "refreshQueues", + "AdminService"); return recordFactory.newRecordInstance(RefreshQueuesResponse.class); } catch (IOException ioe) { LOG.info("Exception refreshing queues ", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), "refreshQueues", + adminAcl.toString(), "AdminService", + "Exception refreshing queues"); throw RPCUtil.getRemoteException(ioe); } } @@ -154,12 +169,17 @@ public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) @Override public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) throws YarnRemoteException { - checkAcls("refreshNodes"); + UserGroupInformation user = checkAcls("refreshNodes"); try { this.nodesListManager.refreshNodes(); + RMAuditLogger.logSuccess(user.getShortUserName(), "refreshNodes", + "AdminService"); return recordFactory.newRecordInstance(RefreshNodesResponse.class); } catch (IOException ioe) { LOG.info("Exception refreshing nodes ", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), "refreshNodes", + adminAcl.toString(), "AdminService", + "Exception refreshing nodes"); throw RPCUtil.getRemoteException(ioe); } } @@ -168,9 +188,11 @@ public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( RefreshSuperUserGroupsConfigurationRequest request) throws YarnRemoteException { - checkAcls("refreshSuperUserGroupsConfiguration"); + UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration"); ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration()); + RMAuditLogger.logSuccess(user.getShortUserName(), + "refreshSuperUserGroupsConfiguration", "AdminService"); return recordFactory.newRecordInstance( RefreshSuperUserGroupsConfigurationResponse.class); @@ -179,9 +201,11 @@ public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfigu @Override public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( RefreshUserToGroupsMappingsRequest request) throws YarnRemoteException { - checkAcls("refreshUserToGroupsMappings"); + UserGroupInformation user = checkAcls("refreshUserToGroupsMappings"); Groups.getUserToGroupsMappingService().refresh(); + RMAuditLogger.logSuccess(user.getShortUserName(), + "refreshUserToGroupsMappings", "AdminService"); return recordFactory.newRecordInstance( RefreshUserToGroupsMappingsResponse.class); @@ -190,12 +214,14 @@ public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( @Override public RefreshAdminAclsResponse refreshAdminAcls( RefreshAdminAclsRequest request) throws YarnRemoteException { - checkAcls("refreshAdminAcls"); + UserGroupInformation user = checkAcls("refreshAdminAcls"); Configuration conf = new Configuration(); adminAcl = new AccessControlList( conf.get(RMConfig.RM_ADMIN_ACL, RMConfig.DEFAULT_RM_ADMIN_ACL)); + RMAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls", + "AdminService"); return recordFactory.newRecordInstance(RefreshAdminAclsResponse.class); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index fe0c8b2300..5f6f7d8b4c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.AMResponse; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.SchedulerSecurityInfo; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -115,11 +117,16 @@ public RegisterApplicationMasterResponse registerApplicationMaster( ApplicationAttemptId applicationAttemptId = request .getApplicationAttemptId(); + ApplicationId appID = applicationAttemptId.getApplicationId(); AMResponse lastResponse = responseMap.get(applicationAttemptId); if (lastResponse == null) { String message = "Application doesn't exist in cache " + applicationAttemptId; LOG.error(message); + RMAuditLogger.logFailure(this.rmContext.getRMApps().get(appID).getUser(), + AuditConstants.REGISTER_AM, message, "ApplicationMasterService", + "Error in registering application master", appID, + applicationAttemptId); throw RPCUtil.getRemoteException(message); } @@ -133,6 +140,10 @@ public RegisterApplicationMasterResponse registerApplicationMaster( new RMAppAttemptRegistrationEvent(applicationAttemptId, request .getHost(), request.getRpcPort(), request.getTrackingUrl())); + RMAuditLogger.logSuccess(this.rmContext.getRMApps().get(appID).getUser(), + AuditConstants.REGISTER_AM, "ApplicationMasterService", appID, + applicationAttemptId); + // Pick up min/max resource from scheduler... RegisterApplicationMasterResponse response = recordFactory .newRecordInstance(RegisterApplicationMasterResponse.class); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 83878c0cd7..dc6f6a796a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -70,6 +70,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; @@ -195,9 +196,11 @@ public SubmitApplicationResponse submitApplication( SubmitApplicationRequest request) throws YarnRemoteException { ApplicationSubmissionContext submissionContext = request .getApplicationSubmissionContext(); + ApplicationId applicationId = null; + String user = null; try { - String user = UserGroupInformation.getCurrentUser().getShortUserName(); - ApplicationId applicationId = submissionContext.getApplicationId(); + user = UserGroupInformation.getCurrentUser().getShortUserName(); + applicationId = submissionContext.getApplicationId(); if (rmContext.getRMApps().get(applicationId) != null) { throw new IOException("Application with id " + applicationId + " is already present! Cannot add a duplicate!"); @@ -207,8 +210,13 @@ public SubmitApplicationResponse submitApplication( LOG.info("Application with id " + applicationId.getId() + " submitted by user " + user + " with " + submissionContext); + RMAuditLogger.logSuccess(user, AuditConstants.SUBMIT_APP_REQUEST, + "ClientRMService", applicationId); } catch (IOException ie) { LOG.info("Exception in submitting application", ie); + RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_APP_REQUEST, + ie.getMessage(), "ClientRMService", + "Exception in submitting application", applicationId); throw RPCUtil.getRemoteException(ie); } @@ -228,6 +236,9 @@ public FinishApplicationResponse finishApplication( callerUGI = UserGroupInformation.getCurrentUser(); } catch (IOException ie) { LOG.info("Error getting UGI ", ie); + RMAuditLogger.logFailure("UNKNOWN", AuditConstants.KILL_APP_REQUEST, + "UNKNOWN", "ClientRMService" , "Error getting UGI", + applicationId); throw RPCUtil.getRemoteException(ie); } @@ -235,6 +246,10 @@ public FinishApplicationResponse finishApplication( // TODO: What if null if (!checkAccess(callerUGI, application.getUser(), ApplicationACL.MODIFY_APP)) { + RMAuditLogger.logFailure(callerUGI.getShortUserName(), + AuditConstants.KILL_APP_REQUEST, + "User doesn't have MODIFY_APP permissions", "ClientRMService", + AuditConstants.UNAUTHORIZED_USER, applicationId); throw RPCUtil.getRemoteException(new AccessControlException("User " + callerUGI.getShortUserName() + " cannot perform operation " + ApplicationACL.MODIFY_APP.name() + " on " + applicationId)); @@ -243,6 +258,8 @@ public FinishApplicationResponse finishApplication( this.rmContext.getDispatcher().getEventHandler().handle( new RMAppEvent(applicationId, RMAppEventType.KILL)); + RMAuditLogger.logSuccess(callerUGI.getShortUserName(), + AuditConstants.KILL_APP_REQUEST, "ClientRMService" , applicationId); FinishApplicationResponse response = recordFactory .newRecordInstance(FinishApplicationResponse.class); return response; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 3219d8220d..0324908f2a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -32,13 +32,18 @@ import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier; import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.util.StringUtils; /** * This class manages the list of applications for the resource manager. @@ -69,6 +74,86 @@ public RMAppManager(RMContext context, ClientToAMSecretManager RMConfig.DEFAULT_EXPIRE_APPLICATIONS_COMPLETED_MAX)); } + /** + * This class is for logging the application summary. + */ + static class ApplicationSummary { + static final Log LOG = LogFactory.getLog(ApplicationSummary.class); + + // Escape sequences + static final char EQUALS = '='; + static final char[] charsToEscape = + {StringUtils.COMMA, EQUALS, StringUtils.ESCAPE_CHAR}; + + static class SummaryBuilder { + final StringBuilder buffer = new StringBuilder(); + + // A little optimization for a very common case + SummaryBuilder add(String key, long value) { + return _add(key, Long.toString(value)); + } + + SummaryBuilder add(String key, T value) { + return _add(key, StringUtils.escapeString(String.valueOf(value), + StringUtils.ESCAPE_CHAR, charsToEscape)); + } + + SummaryBuilder add(SummaryBuilder summary) { + if (buffer.length() > 0) buffer.append(StringUtils.COMMA); + buffer.append(summary.buffer); + return this; + } + + SummaryBuilder _add(String key, String value) { + if (buffer.length() > 0) buffer.append(StringUtils.COMMA); + buffer.append(key).append(EQUALS).append(value); + return this; + } + + @Override public String toString() { + return buffer.toString(); + } + } + + /** + * create a summary of the application's runtime. + * + * @param app {@link RMApp} whose summary is to be created, cannot + * be null. + */ + public static SummaryBuilder createAppSummary(RMApp app) { + String trackingUrl = "N/A"; + String host = "N/A"; + RMAppAttempt attempt = app.getCurrentAppAttempt(); + if (attempt != null) { + trackingUrl = attempt.getTrackingUrl(); + host = attempt.getHost(); + } + SummaryBuilder summary = new SummaryBuilder() + .add("appId", app.getApplicationId()) + .add("name", app.getName()) + .add("user", app.getUser()) + .add("queue", app.getQueue()) + .add("state", app.getState()) + .add("trackingUrl", trackingUrl) + .add("appMasterHost", host) + .add("startTime", app.getStartTime()) + .add("finishTime", app.getFinishTime()); + return summary; + } + + /** + * Log a summary of the application's runtime. + * + * @param app {@link RMApp} whose summary is to be logged + */ + public static void logAppSummary(RMApp app) { + if (app != null) { + LOG.info(createAppSummary(app)); + } + } + } + protected void setCompletedAppsMax(int max) { this.completedAppsMax = max; } @@ -82,8 +167,39 @@ protected synchronized void addCompletedApp(ApplicationId appId) { LOG.error("RMAppManager received completed appId of null, skipping"); } else { completedApps.add(appId); + writeAuditLog(appId); } - }; + } + + protected void writeAuditLog(ApplicationId appId) { + RMApp app = rmContext.getRMApps().get(appId); + String operation = "UNKONWN"; + boolean success = false; + switch (app.getState()) { + case FAILED: + operation = AuditConstants.FINISH_FAILED_APP; + break; + case FINISHED: + operation = AuditConstants.FINISH_SUCCESS_APP; + success = true; + break; + case KILLED: + operation = AuditConstants.FINISH_KILLED_APP; + success = true; + break; + default: + } + + if (success) { + RMAuditLogger.logSuccess(app.getUser(), operation, + "RMAppManager", app.getApplicationId()); + } else { + StringBuilder diag = app.getDiagnostics(); + String msg = diag == null ? null : diag.toString(); + RMAuditLogger.logFailure(app.getUser(), operation, msg, "RMAppManager", + "App failed with state: " + app.getState(), appId); + } + } /* * check to see if hit the limit for max # completed apps kept @@ -154,6 +270,7 @@ public void handle(RMAppManagerEvent event) { case APP_COMPLETED: { addCompletedApp(appID); + ApplicationSummary.logAppSummary(rmContext.getRMApps().get(appID)); checkAppNumCompletedLimit(); } break; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java new file mode 100644 index 0000000000..b9261cac20 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java @@ -0,0 +1,309 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager; + +import java.net.InetAddress; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; + +/** + * Manages ResourceManager audit logs. + * + * Audit log format is written as key=value pairs. Tab separated. + */ +public class RMAuditLogger { + private static final Log LOG = LogFactory.getLog(RMAuditLogger.class); + + static enum Keys {USER, OPERATION, TARGET, RESULT, IP, PERMISSIONS, + DESCRIPTION, APPID, APPATTEMPTID, CONTAINERID} + + public static class AuditConstants { + static final String SUCCESS = "SUCCESS"; + static final String FAILURE = "FAILURE"; + static final String KEY_VAL_SEPARATOR = "="; + static final char PAIR_SEPARATOR = '\t'; + + public static final String KILL_APP_REQUEST = "Kill Application Request"; + public static final String SUBMIT_APP_REQUEST = "Submit Application Request"; + public static final String FINISH_SUCCESS_APP = "Application Finished - Succeeded"; + public static final String FINISH_FAILED_APP = "Application Finished - Failed"; + public static final String FINISH_KILLED_APP = "Application Finished - Killed"; + public static final String REGISTER_AM = "Register App Master"; + public static final String ALLOC_CONTAINER = "AM Allocated Container"; + public static final String RELEASE_CONTAINER = "AM Released Container"; + + // Some commonly used descriptions + public static final String UNAUTHORIZED_USER = "Unauthorized user"; + } + + /** + * A helper api for creating an audit log for a successful event. + */ + static String createSuccessLog(String user, String operation, String target, + ApplicationId appId, ApplicationAttemptId attemptId, ContainerId containerId) { + StringBuilder b = new StringBuilder(); + start(Keys.USER, user, b); + addRemoteIP(b); + add(Keys.OPERATION, operation, b); + add(Keys.TARGET, target ,b); + add(Keys.RESULT, AuditConstants.SUCCESS, b); + if (appId != null) { + add(Keys.APPID, appId.toString(), b); + } + if (attemptId != null) { + add(Keys.APPATTEMPTID, attemptId.toString(), b); + } + if (containerId != null) { + add(Keys.CONTAINERID, containerId.toString(), b); + } + return b.toString(); + } + + /** + * Create a readable and parseable audit log string for a successful event. + * + * @param user User who made the service request to the ResourceManager + * @param operation Operation requested by the user. + * @param target The target on which the operation is being performed. + * @param appId Application Id in which operation was performed. + * @param containerId Container Id in which operation was performed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logSuccess(String user, String operation, String target, + ApplicationId appId, ContainerId containerId) { + if (LOG.isInfoEnabled()) { + LOG.info(createSuccessLog(user, operation, target, appId, null, + containerId)); + } + } + + /** + * Create a readable and parseable audit log string for a successful event. + * + * @param user User who made the service request to the ResourceManager. + * @param operation Operation requested by the user. + * @param target The target on which the operation is being performed. + * @param appId Application Id in which operation was performed. + * @param attemptId Application Attempt Id in which operation was performed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logSuccess(String user, String operation, String target, + ApplicationId appId, ApplicationAttemptId attemptId) { + if (LOG.isInfoEnabled()) { + LOG.info(createSuccessLog(user, operation, target, appId, attemptId, + null)); + } + } + + + /** + * Create a readable and parseable audit log string for a successful event. + * + * @param user User who made the service request to the ResourceManager. + * @param operation Operation requested by the user. + * @param target The target on which the operation is being performed. + * @param appId Application Id in which operation was performed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logSuccess(String user, String operation, String target, + ApplicationId appId) { + if (LOG.isInfoEnabled()) { + LOG.info(createSuccessLog(user, operation, target, appId, null, null)); + } + } + + /** + * Create a readable and parseable audit log string for a successful event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user. + * @param target The target on which the operation is being performed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logSuccess(String user, String operation, String target) { + if (LOG.isInfoEnabled()) { + LOG.info(createSuccessLog(user, operation, target, null, null, null)); + } + } + + /** + * A helper api for creating an audit log for a failure event. + */ + static String createFailureLog(String user, String operation, String perm, + String target, String description, ApplicationId appId, + ApplicationAttemptId attemptId, ContainerId containerId) { + StringBuilder b = new StringBuilder(); + start(Keys.USER, user, b); + addRemoteIP(b); + add(Keys.OPERATION, operation, b); + add(Keys.TARGET, target ,b); + add(Keys.RESULT, AuditConstants.FAILURE, b); + add(Keys.DESCRIPTION, description, b); + add(Keys.PERMISSIONS, perm, b); + if (appId != null) { + add(Keys.APPID, appId.toString(), b); + } + if (attemptId != null) { + add(Keys.APPATTEMPTID, attemptId.toString(), b); + } + if (containerId != null) { + add(Keys.CONTAINERID, containerId.toString(), b); + } + return b.toString(); + } + + /** + * Create a readable and parseable audit log string for a failed event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user. + * @param perm Target permissions. + * @param target The target on which the operation is being performed. + * @param description Some additional information as to why the operation + * failed. + * @param appId Application Id in which operation was performed. + * @param containerId Container Id in which operation was performed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logFailure(String user, String operation, String perm, + String target, String description, ApplicationId appId, + ContainerId containerId) { + if (LOG.isWarnEnabled()) { + LOG.warn(createFailureLog(user, operation, perm, target, description, + appId, null, containerId)); + } + } + + /** + * Create a readable and parseable audit log string for a failed event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user. + * @param perm Target permissions. + * @param target The target on which the operation is being performed. + * @param description Some additional information as to why the operation + * failed. + * @param appId ApplicationId in which operation was performed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logFailure(String user, String operation, String perm, + String target, String description, ApplicationId appId, + ApplicationAttemptId attemptId) { + if (LOG.isWarnEnabled()) { + LOG.warn(createFailureLog(user, operation, perm, target, description, + appId, attemptId, null)); + } + } + + + /** + * Create a readable and parseable audit log string for a failed event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user. + * @param perm Target permissions. + * @param target The target on which the operation is being performed. + * @param description Some additional information as to why the operation + * failed. + * @param appId ApplicationId in which operation was performed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logFailure(String user, String operation, String perm, + String target, String description, ApplicationId appId) { + if (LOG.isWarnEnabled()) { + LOG.warn(createFailureLog(user, operation, perm, target, description, + appId, null, null)); + } + } + + /** + * Create a readable and parseable audit log string for a failed event. + * + * @param user User who made the service request. + * @param operation Operation requested by the user. + * @param perm Target permissions. + * @param target The target on which the operation is being performed. + * @param description Some additional information as to why the operation + * failed. + * + *

    + * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter + * and hence the value fields should not contains tabs ('\t'). + */ + public static void logFailure(String user, String operation, String perm, + String target, String description) { + if (LOG.isWarnEnabled()) { + LOG.warn(createFailureLog(user, operation, perm, target, description, + null, null, null)); + } + } + + /** + * A helper api to add remote IP address + */ + static void addRemoteIP(StringBuilder b) { + InetAddress ip = Server.getRemoteIp(); + // ip address can be null for testcases + if (ip != null) { + add(Keys.IP, ip.getHostAddress(), b); + } + } + + /** + * Adds the first key-val pair to the passed builder in the following format + * key=value + */ + static void start(Keys key, String value, StringBuilder b) { + b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); + } + + /** + * Appends the key-val pair to the passed builder in the following format + * key=value + */ + static void add(Keys key, String value, StringBuilder b) { + b.append(AuditConstants.PAIR_SEPARATOR).append(key.name()) + .append(AuditConstants.KEY_VAL_SEPARATOR).append(value); + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 553b98b52a..fd42e5f1ff 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -31,6 +31,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -460,7 +461,7 @@ public ClientRMService getClientRMService() { /** * return the scheduler. - * @return + * @return the scheduler for the Resource Manager. */ @Private public ResourceScheduler getResourceScheduler() { @@ -469,7 +470,7 @@ public ResourceScheduler getResourceScheduler() { /** * return the resource tracking component. - * @return + * @return the resource tracking component. */ @Private public ResourceTrackerService getResourceTrackerService() { @@ -488,6 +489,7 @@ public void recover(RMState state) throws Exception { } public static void main(String argv[]) { + StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG); ResourceManager resourceManager = null; try { Configuration conf = new YarnConfiguration(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java index babad10128..0d8e563f28 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java @@ -29,7 +29,7 @@ /** * This interface is the one implemented by the schedulers. It mainly extends - * {@link ResourceListener} and {@link YarnScheduler}. + * {@link YarnScheduler}. * */ @LimitedPrivate("yarn") diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java index e544e4a66e..13101fc95e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java @@ -39,6 +39,8 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; @@ -73,7 +75,11 @@ public class SchedulerApp { final Map> reservedContainers = new HashMap>(); - Map schedulingOpportunities = new HashMap(); + Map schedulingOpportunities = + new HashMap(); + + Map reReservations = + new HashMap(); Resource currentReservation = recordFactory .newRecordInstance(Resource.class); @@ -178,6 +184,10 @@ synchronized public void containerCompleted(RMContainer rmContainer, // Remove from the list of containers liveContainers.remove(rmContainer.getContainerId()); + + RMAuditLogger.logSuccess(getUser(), + AuditConstants.RELEASE_CONTAINER, "SchedulerApp", + getApplicationId(), containerId); // Update usage metrics Resource containerResource = rmContainer.getContainer().getResource(); @@ -213,6 +223,9 @@ synchronized public RMContainer allocate(NodeType type, SchedulerNode node, + " container=" + container.getId() + " host=" + container.getNodeId().getHost() + " type=" + type); } + RMAuditLogger.logSuccess(getUser(), + AuditConstants.ALLOC_CONTAINER, "SchedulerApp", + getApplicationId(), container.getId()); // Add it to allContainers list. newlyAllocatedContainers.add(rmContainer); @@ -265,15 +278,15 @@ public synchronized RMContainer getRMContainer(ContainerId id) { } synchronized public void resetSchedulingOpportunities(Priority priority) { - Integer schedulingOpportunities = this.schedulingOpportunities - .get(priority); + Integer schedulingOpportunities = + this.schedulingOpportunities.get(priority); schedulingOpportunities = 0; this.schedulingOpportunities.put(priority, schedulingOpportunities); } synchronized public void addSchedulingOpportunity(Priority priority) { - Integer schedulingOpportunities = this.schedulingOpportunities - .get(priority); + Integer schedulingOpportunities = + this.schedulingOpportunities.get(priority); if (schedulingOpportunities == null) { schedulingOpportunities = 0; } @@ -282,8 +295,8 @@ synchronized public void addSchedulingOpportunity(Priority priority) { } synchronized public int getSchedulingOpportunities(Priority priority) { - Integer schedulingOpportunities = this.schedulingOpportunities - .get(priority); + Integer schedulingOpportunities = + this.schedulingOpportunities.get(priority); if (schedulingOpportunities == null) { schedulingOpportunities = 0; this.schedulingOpportunities.put(priority, schedulingOpportunities); @@ -291,6 +304,30 @@ synchronized public int getSchedulingOpportunities(Priority priority) { return schedulingOpportunities; } + synchronized void resetReReservations(Priority priority) { + Integer reReservations = this.reReservations.get(priority); + reReservations = 0; + this.reReservations.put(priority, reReservations); + } + + synchronized void addReReservation(Priority priority) { + Integer reReservations = this.reReservations.get(priority); + if (reReservations == null) { + reReservations = 0; + } + ++reReservations; + this.reReservations.put(priority, reReservations); + } + + synchronized public int getReReservations(Priority priority) { + Integer reReservations = this.reReservations.get(priority); + if (reReservations == null) { + reReservations = 0; + this.reReservations.put(priority, reReservations); + } + return reReservations; + } + public synchronized int getNumReservedContainers(Priority priority) { Map reservedContainers = this.reservedContainers.get(priority); @@ -318,6 +355,12 @@ public synchronized RMContainer reserve(SchedulerNode node, Priority priority, rmContext.getContainerAllocationExpirer()); Resources.addTo(currentReservation, container.getResource()); + + // Reset the re-reservation count + resetReReservations(priority); + } else { + // Note down the re-reservation + addReReservation(priority); } rmContainer.handle(new RMContainerReservedEvent(container.getId(), container.getResource(), node.getNodeID(), priority)); @@ -347,6 +390,9 @@ public synchronized void unreserve(SchedulerNode node, Priority priority) { this.reservedContainers.remove(priority); } + // Reset the re-reservation count + resetReReservations(priority); + Resource resource = reservedContainer.getContainer().getResource(); Resources.subtractFrom(currentReservation, resource); @@ -360,7 +406,7 @@ public synchronized void unreserve(SchedulerNode node, Priority priority) { * given priority? * @param node node to be checked * @param priority priority of reserved container - * @return + * @return true is reserved, false if not */ public synchronized boolean isReserved(SchedulerNode node, Priority priority) { Map reservedContainers = diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index 92a98c1aef..a666478035 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -90,7 +90,7 @@ public String getRackName() { * given application. * * @param applicationId application - * @param containers allocated containers + * @param rmContainer allocated container */ public synchronized void allocateContainer(ApplicationId applicationId, RMContainer rmContainer) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 77172e80bb..4fff1ff1a4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -46,6 +46,8 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; @@ -166,6 +168,11 @@ public RMContext getRMContext() { return this.rmContext; } + @Override + public Resource getClusterResources() { + return clusterResource; + } + @Override public synchronized void reinitialize(Configuration conf, ContainerTokenSecretManager containerTokenSecretManager, RMContext rmContext) @@ -348,6 +355,8 @@ synchronized Queue getQueue(String queueName) { try { queue.submitApplication(SchedulerApp, user, queueName); } catch (AccessControlException ace) { + LOG.info("Failed to submit application " + applicationAttemptId + + " to queue " + queueName + " from user " + user, ace); this.rmContext.getDispatcher().getEventHandler().handle( new RMAppAttemptRejectedEvent(applicationAttemptId, ace.toString())); @@ -428,8 +437,15 @@ public Allocation allocate(ApplicationAttemptId applicationAttemptId, // Release containers for (ContainerId releasedContainerId : release) { - completedContainer(getRMContainer(releasedContainerId), - RMContainerEventType.RELEASED); + RMContainer rmContainer = getRMContainer(releasedContainerId); + if (rmContainer == null) { + RMAuditLogger.logFailure(application.getUser(), + AuditConstants.RELEASE_CONTAINER, + "Unauthorized access or invalid container", "CapacityScheduler", + "Trying to release container not owned by app or with invalid id", + application.getApplicationId(), releasedContainerId); + } + completedContainer(rmContainer, RMContainerEventType.RELEASED); } synchronized (application) { @@ -621,6 +637,7 @@ public void handle(SchedulerEvent event) { private synchronized void addNode(RMNode nodeManager) { this.nodes.put(nodeManager.getNodeID(), new SchedulerNode(nodeManager)); Resources.addTo(clusterResource, nodeManager.getTotalCapability()); + root.updateClusterResource(clusterResource); ++numNodeManagers; LOG.info("Added node " + nodeManager.getNodeAddress() + " clusterResource: " + clusterResource); @@ -629,6 +646,7 @@ private synchronized void addNode(RMNode nodeManager) { private synchronized void removeNode(RMNode nodeInfo) { SchedulerNode node = this.nodes.get(nodeInfo.getNodeID()); Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability()); + root.updateClusterResource(clusterResource); --numNodeManagers; // Remove running containers diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 714a472678..345381651c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -49,6 +49,10 @@ public class CapacitySchedulerConfiguration extends Configuration { public static final String MAXIMUM_SYSTEM_APPLICATIONS = PREFIX + "maximum-applications"; + @Private + public static final String MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT = + PREFIX + "maximum-am-resource-percent"; + @Private public static final String QUEUES = "queues"; @@ -82,6 +86,10 @@ public class CapacitySchedulerConfiguration extends Configuration { @Private public static final int DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS = 10000; + @Private + public static final float + DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT = 0.1f; + @Private public static final int UNDEFINED = -1; @@ -124,6 +132,11 @@ public int getMaximumSystemApplications() { return maxApplications; } + public float getMaximumApplicationMasterResourcePercent() { + return getFloat(MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, + DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT); + } + public int getCapacity(String queue) { int capacity = getInt(getQueuePrefix(queue) + CAPACITY, UNDEFINED); if (capacity < MINIMUM_CAPACITY_VALUE || capacity > MAXIMUM_CAPACITY_VALUE) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java index d48557a329..5f06bf6644 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; @@ -38,4 +37,6 @@ public interface CapacitySchedulerContext { int getNumClusterNodes(); RMContext getRMContext(); + + Resource getClusterResources(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 6f98c268ff..3d3ac1265e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -24,6 +24,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -77,16 +78,25 @@ public class LeafQueue implements Queue { private int maxApplications; private int maxApplicationsPerUser; + + private float maxAMResourcePercent; + private int maxActiveApplications; + private int maxActiveApplicationsPerUser; + private Resource usedResources = Resources.createResource(0); private float utilization = 0.0f; private float usedCapacity = 0.0f; private volatile int numContainers; - Set applications; + Set activeApplications; Map applicationsMap = new HashMap(); - public final Resource minimumAllocation; + Set pendingApplications; + + private final Resource minimumAllocation; + private final Resource maximumAllocation; + private final float minimumAllocationFactor; private ContainerTokenSecretManager containerTokenSecretManager; @@ -106,6 +116,8 @@ public class LeafQueue implements Queue { private CapacitySchedulerContext scheduler; + final static int DEFAULT_AM_RESOURCE = 2 * 1024; + public LeafQueue(CapacitySchedulerContext cs, String queueName, Queue parent, Comparator applicationComparator, Queue old) { @@ -118,6 +130,10 @@ public LeafQueue(CapacitySchedulerContext cs, cs.getConfiguration().getEnableUserMetrics()); this.minimumAllocation = cs.getMinimumResourceCapability(); + this.maximumAllocation = cs.getMaximumResourceCapability(); + this.minimumAllocationFactor = + (float)(maximumAllocation.getMemory() - minimumAllocation.getMemory()) / + maximumAllocation.getMemory(); this.containerTokenSecretManager = cs.getContainerTokenSecretManager(); float capacity = @@ -138,6 +154,15 @@ public LeafQueue(CapacitySchedulerContext cs, int maxApplicationsPerUser = (int)(maxApplications * (userLimit / 100.0f) * userLimitFactor); + this.maxAMResourcePercent = + cs.getConfiguration().getMaximumApplicationMasterResourcePercent(); + int maxActiveApplications = + computeMaxActiveApplications(cs.getClusterResources(), + maxAMResourcePercent, absoluteCapacity); + int maxActiveApplicationsPerUser = + computeMaxActiveApplicationsPerUser(maxActiveApplications, userLimit, + userLimitFactor); + this.queueInfo = recordFactory.newRecordInstance(QueueInfo.class); this.queueInfo.setQueueName(queueName); this.queueInfo.setChildQueues(new ArrayList()); @@ -151,20 +176,38 @@ public LeafQueue(CapacitySchedulerContext cs, maximumCapacity, absoluteMaxCapacity, userLimit, userLimitFactor, maxApplications, maxApplicationsPerUser, + maxActiveApplications, maxActiveApplicationsPerUser, state, acls); LOG.info("DEBUG --- LeafQueue:" + " name=" + queueName + ", fullname=" + getQueuePath()); - this.applications = new TreeSet(applicationComparator); + this.pendingApplications = + new TreeSet(applicationComparator); + this.activeApplications = new TreeSet(applicationComparator); } + private int computeMaxActiveApplications(Resource clusterResource, + float maxAMResourcePercent, float absoluteCapacity) { + return + Math.max( + (int)((clusterResource.getMemory() / DEFAULT_AM_RESOURCE) * + maxAMResourcePercent * absoluteCapacity), + 1); + } + + private int computeMaxActiveApplicationsPerUser(int maxActiveApplications, + int userLimit, float userLimitFactor) { + return (int)(maxActiveApplications * (userLimit / 100.0f) * userLimitFactor); + } + private synchronized void setupQueueConfigs( float capacity, float absoluteCapacity, float maxCapacity, float absoluteMaxCapacity, int userLimit, float userLimitFactor, int maxApplications, int maxApplicationsPerUser, + int maxActiveApplications, int maxActiveApplicationsPerUser, QueueState state, Map acls) { this.capacity = capacity; @@ -179,6 +222,9 @@ private synchronized void setupQueueConfigs( this.maxApplications = maxApplications; this.maxApplicationsPerUser = maxApplicationsPerUser; + this.maxActiveApplications = maxActiveApplications; + this.maxActiveApplicationsPerUser = maxActiveApplicationsPerUser; + this.state = state; this.acls = acls; @@ -239,6 +285,46 @@ public String getQueuePath() { return parent.getQueuePath() + "." + getQueueName(); } + /** + * Used only by tests. + */ + @Private + public Resource getMinimumAllocation() { + return minimumAllocation; + } + + /** + * Used only by tests. + */ + @Private + public Resource getMaximumAllocation() { + return maximumAllocation; + } + + /** + * Used only by tests. + */ + @Private + public float getMinimumAllocationFactor() { + return minimumAllocationFactor; + } + + public int getMaxApplications() { + return maxApplications; + } + + public int getMaxApplicationsPerUser() { + return maxApplicationsPerUser; + } + + public int getMaximumActiveApplications() { + return maxActiveApplications; + } + + public int getMaximumActiveApplicationsPerUser() { + return maxActiveApplicationsPerUser; + } + @Override public synchronized float getUsedCapacity() { return usedCapacity; @@ -299,10 +385,34 @@ synchronized void setParentQueue(Queue parent) { this.parent = parent; } + @Override public synchronized int getNumApplications() { - return applications.size(); + return getNumPendingApplications() + getNumActiveApplications(); } + public synchronized int getNumPendingApplications() { + return pendingApplications.size(); + } + + public synchronized int getNumActiveApplications() { + return activeApplications.size(); + } + + @Private + public synchronized int getNumApplications(String user) { + return getUser(user).getTotalApplications(); + } + + @Private + public synchronized int getNumPendingApplications(String user) { + return getUser(user).getPendingApplications(); + } + + @Private + public synchronized int getNumActiveApplications(String user) { + return getUser(user).getActiveApplications(); + } + public synchronized int getNumContainers() { return numContainers; } @@ -312,6 +422,16 @@ public synchronized QueueState getState() { return state; } + @Private + public int getUserLimit() { + return userLimit; + } + + @Private + public float getUserLimitFactor() { + return userLimitFactor; + } + @Override public synchronized Map getQueueAcls() { return new HashMap(acls); @@ -374,6 +494,8 @@ public synchronized void reinitialize(Queue queue, Resource clusterResource) leafQueue.maximumCapacity, leafQueue.absoluteMaxCapacity, leafQueue.userLimit, leafQueue.userLimitFactor, leafQueue.maxApplications, leafQueue.maxApplicationsPerUser, + leafQueue.maxActiveApplications, + leafQueue.maxActiveApplicationsPerUser, leafQueue.state, leafQueue.acls); updateResource(clusterResource); @@ -413,7 +535,7 @@ public void submitApplication(SchedulerApp application, String userName, synchronized (this) { // Check if the queue is accepting jobs - if (state != QueueState.RUNNING) { + if (getState() != QueueState.RUNNING) { String msg = "Queue " + getQueuePath() + " is STOPPED. Cannot accept submission of application: " + application.getApplicationId(); @@ -422,7 +544,7 @@ public void submitApplication(SchedulerApp application, String userName, } // Check submission limits for queues - if (getNumApplications() >= maxApplications) { + if (getNumApplications() >= getMaxApplications()) { String msg = "Queue " + getQueuePath() + " already has " + getNumApplications() + " applications," + " cannot accept submission of application: " + @@ -433,9 +555,9 @@ public void submitApplication(SchedulerApp application, String userName, // Check submission limits for the user on this queue user = getUser(userName); - if (user.getApplications() >= maxApplicationsPerUser) { + if (user.getTotalApplications() >= getMaxApplicationsPerUser()) { String msg = "Queue " + getQueuePath() + - " already has " + user.getApplications() + + " already has " + user.getTotalApplications() + " applications from user " + userName + " cannot accept submission of application: " + application.getApplicationId(); @@ -460,17 +582,46 @@ public void submitApplication(SchedulerApp application, String userName, } } + private synchronized void activateApplications() { + for (Iterator i=pendingApplications.iterator(); + i.hasNext(); ) { + SchedulerApp application = i.next(); + + // Check queue limit + if (getNumActiveApplications() >= getMaximumActiveApplications()) { + break; + } + + // Check user limit + User user = getUser(application.getUser()); + if (user.getActiveApplications() < getMaximumActiveApplicationsPerUser()) { + user.activateApplication(); + activeApplications.add(application); + i.remove(); + LOG.info("Application " + application.getApplicationId().getId() + + " from user: " + application.getUser() + + " activated in queue: " + getQueueName()); + } + } + } + private synchronized void addApplication(SchedulerApp application, User user) { // Accept user.submitApplication(); - applications.add(application); + pendingApplications.add(application); applicationsMap.put(application.getApplicationAttemptId(), application); + // Activate applications + activateApplications(); + LOG.info("Application added -" + " appId: " + application.getApplicationId() + " user: " + user + "," + " leaf-queue: " + getQueueName() + - " #user-applications: " + user.getApplications() + - " #queue-applications: " + getNumApplications()); + " #user-pending-applications: " + user.getPendingApplications() + + " #user-active-applications: " + user.getActiveApplications() + + " #queue-pending-applications: " + getNumPendingApplications() + + " #queue-active-applications: " + getNumActiveApplications() + ); } @Override @@ -485,20 +636,26 @@ public void finishApplication(SchedulerApp application, String queue) { } public synchronized void removeApplication(SchedulerApp application, User user) { - applications.remove(application); + activeApplications.remove(application); applicationsMap.remove(application.getApplicationAttemptId()); user.finishApplication(); - if (user.getApplications() == 0) { + if (user.getTotalApplications() == 0) { users.remove(application.getUser()); } + // Check if we can activate more applications + activateApplications(); + LOG.info("Application removed -" + " appId: " + application.getApplicationId() + " user: " + application.getUser() + " queue: " + getQueueName() + - " #user-applications: " + user.getApplications() + - " #queue-applications: " + getNumApplications()); + " #user-pending-applications: " + user.getPendingApplications() + + " #user-active-applications: " + user.getActiveApplications() + + " #queue-pending-applications: " + getNumPendingApplications() + + " #queue-active-applications: " + getNumActiveApplications() + ); } private synchronized SchedulerApp getApplication( @@ -512,7 +669,7 @@ private synchronized SchedulerApp getApplication( LOG.info("DEBUG --- assignContainers:" + " node=" + node.getHostName() + - " #applications=" + applications.size()); + " #applications=" + activeApplications.size()); // Check for reserved resources RMContainer reservedContainer = node.getReservedContainer(); @@ -524,7 +681,7 @@ private synchronized SchedulerApp getApplication( } // Try to assign containers to applications in order - for (SchedulerApp application : applications) { + for (SchedulerApp application : activeApplications) { LOG.info("DEBUG --- pre-assignContainers for application " + application.getApplicationId()); @@ -536,25 +693,24 @@ private synchronized SchedulerApp getApplication( setUserResourceLimit(application, userLimit); for (Priority priority : application.getPriorities()) { + // Required resource + Resource required = + application.getResourceRequest(priority, RMNode.ANY).getCapability(); // Do we need containers at this 'priority'? - if (!needContainers(application, priority)) { + if (!needContainers(application, priority, required)) { continue; } // Are we going over limits by allocating to this application? - ResourceRequest required = - application.getResourceRequest(priority, RMNode.ANY); - // Maximum Capacity of the queue - if (!assignToQueue(clusterResource, required.getCapability())) { + if (!assignToQueue(clusterResource, required)) { return Resources.none(); } // User limits userLimit = - computeUserLimit(application, clusterResource, - required.getCapability()); + computeUserLimit(application, clusterResource, required); if (!assignToUser(application.getUser(), userLimit)) { break; } @@ -732,10 +888,32 @@ private static int divideAndCeil(int a, int b) { return (a + (b - 1)) / b; } - boolean needContainers(SchedulerApp application, Priority priority) { + boolean needContainers(SchedulerApp application, Priority priority, Resource required) { int requiredContainers = application.getTotalRequiredResources(priority); int reservedContainers = application.getNumReservedContainers(priority); - return ((requiredContainers - reservedContainers) > 0); + int starvation = 0; + if (reservedContainers > 0) { + float nodeFactor = + ((float)required.getMemory() / getMaximumAllocation().getMemory()); + + // Use percentage of node required to bias against large containers... + // Protect against corner case where you need the whole node with + // Math.min(nodeFactor, minimumAllocationFactor) + starvation = + (int)((application.getReReservations(priority) / reservedContainers) * + (1.0f - (Math.min(nodeFactor, getMinimumAllocationFactor()))) + ); + + if (LOG.isDebugEnabled()) { + LOG.debug("needsContainers:" + + " app.#re-reserve=" + application.getReReservations(priority) + + " reserved=" + reservedContainers + + " nodeFactor=" + nodeFactor + + " minAllocFactor=" + minimumAllocationFactor + + " starvation=" + starvation); + } + } + return (((starvation + requiredContainers) - reservedContainers) > 0); } private Resource assignContainersOnNode(Resource clusterResource, @@ -1068,7 +1246,16 @@ synchronized void releaseResource(Resource clusterResource, } @Override - public synchronized void updateResource(Resource clusterResource) { + public synchronized void updateClusterResource(Resource clusterResource) { + maxActiveApplications = + computeMaxActiveApplications(clusterResource, maxAMResourcePercent, + absoluteCapacity); + maxActiveApplicationsPerUser = + computeMaxActiveApplicationsPerUser(maxActiveApplications, userLimit, + userLimitFactor); + } + + private synchronized void updateResource(Resource clusterResource) { float queueLimit = clusterResource.getMemory() * absoluteCapacity; setUtilization(usedResources.getMemory() / queueLimit); setUsedCapacity( @@ -1087,22 +1274,36 @@ public QueueMetrics getMetrics() { static class User { Resource consumed = Resources.createResource(0); - int applications = 0; + int pendingApplications = 0; + int activeApplications = 0; public Resource getConsumedResources() { return consumed; } - public int getApplications() { - return applications; + public int getPendingApplications() { + return pendingApplications; } + public int getActiveApplications() { + return activeApplications; + } + + public int getTotalApplications() { + return getPendingApplications() + getActiveApplications(); + } + public synchronized void submitApplication() { - ++applications; + ++pendingApplications; + } + + public synchronized void activateApplication() { + --pendingApplications; + ++activeApplications; } public synchronized void finishApplication() { - --applications; + --activeApplications; } public synchronized void assignContainer(Resource resource) { @@ -1124,4 +1325,5 @@ public void recoverContainer(Resource clusterResource, parent.recoverContainer(clusterResource, application, container); } + } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 7aa37fc9d1..4be8522c5e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -646,7 +646,14 @@ synchronized void releaseResource(Resource clusterResource, } @Override - public synchronized void updateResource(Resource clusterResource) { + public synchronized void updateClusterResource(Resource clusterResource) { + // Update all children + for (Queue childQueue : childQueues) { + childQueue.updateClusterResource(clusterResource); + } + } + + private synchronized void updateResource(Resource clusterResource) { float queueLimit = clusterResource.getMemory() * absoluteCapacity; setUtilization(usedResources.getMemory() / queueLimit); setUsedCapacity( diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java index 4bd486e9c6..446ff8f822 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java @@ -29,7 +29,6 @@ import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp; @@ -155,7 +154,7 @@ public void submitApplication(SchedulerApp application, String user, * Assign containers to applications in the queue or it's children (if any). * @param clusterResource the resource of the cluster. * @param node node on which resources are available - * @return + * @return the resource that is being assigned. */ public Resource assignContainers(Resource clusterResource, SchedulerNode node); @@ -191,7 +190,7 @@ public void reinitialize(Queue queue, Resource clusterResource) * Update the cluster resource for queues as we add/remove nodes * @param clusterResource the current cluster resource */ - public void updateResource(Resource clusterResource); + public void updateClusterResource(Resource clusterResource); /** * Recover the state of the queue diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index be6c9048a2..06aea2c9a4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -55,6 +55,8 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; @@ -225,8 +227,15 @@ public Allocation allocate( // Release containers for (ContainerId releasedContainer : release) { - containerCompleted(getRMContainer(releasedContainer), - RMContainerEventType.RELEASED); + RMContainer rmContainer = getRMContainer(releasedContainer); + if (rmContainer == null) { + RMAuditLogger.logFailure(application.getUser(), + AuditConstants.RELEASE_CONTAINER, + "Unauthorized access or invalid container", "FifoScheduler", + "Trying to release container not owned by app or with invalid id", + application.getApplicationId(), releasedContainer); + } + containerCompleted(rmContainer, RMContainerEventType.RELEASED); } if (!ask.isEmpty()) { @@ -642,6 +651,11 @@ private void containerLaunchedOnNode(Container container, SchedulerNode node) { @Lock(FifoScheduler.class) private synchronized void containerCompleted(RMContainer rmContainer, RMContainerEventType event) { + if (rmContainer == null) { + LOG.info("Null container completed..."); + return; + } + // Get the application for the finished container Container container = rmContainer.getContainer(); ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId(); @@ -725,7 +739,7 @@ public synchronized SchedulerNodeReport getNodeReport(NodeId nodeId) { private RMContainer getRMContainer(ContainerId containerId) { SchedulerApp application = getApplication(containerId.getAppAttemptId()); - return application.getRMContainer(containerId); + return (application == null) ? null : application.getRMContainer(containerId); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml index f6e2b0ce74..43a0437b9d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml @@ -5,6 +5,11 @@ 10000
    + + yarn.capacity-scheduler.maximum-am-resource-percent + 0.1 + + yarn.capacity-scheduler.root.queues default diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java new file mode 100644 index 0000000000..9291b49aba --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java @@ -0,0 +1,244 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager; + +import java.net.InetAddress; +import java.net.InetSocketAddress; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.ipc.TestRPC.TestImpl; +import org.apache.hadoop.ipc.TestRPC.TestProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.Keys; + +import org.apache.hadoop.net.NetUtils; + +import static org.mockito.Mockito.*; +import static junit.framework.Assert.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + + +/** + * Tests {@link RMAuditLogger}. + */ +public class TestRMAuditLogger { + private static final Log LOG = LogFactory.getLog(TestRMAuditLogger.class); + private static final String USER = "test"; + private static final String OPERATION = "oper"; + private static final String TARGET = "tgt"; + private static final String PERM = "admin group"; + private static final String DESC = "description of an audit log"; + private static final ApplicationId APPID = mock(ApplicationId.class); + private static final ApplicationAttemptId ATTEMPTID = mock(ApplicationAttemptId.class); + private static final ContainerId CONTAINERID = mock(ContainerId.class); + + @Before + public void setUp() throws Exception { + when(APPID.toString()).thenReturn("app_1"); + when(ATTEMPTID.toString()).thenReturn("app_attempt_1"); + when(CONTAINERID.toString()).thenReturn("container_1"); + } + + + /** + * Test the AuditLog format with key-val pair. + */ + @Test + public void testKeyValLogFormat() throws Exception { + StringBuilder actLog = new StringBuilder(); + StringBuilder expLog = new StringBuilder(); + // add the first k=v pair and check + RMAuditLogger.start(Keys.USER, USER, actLog); + expLog.append("USER=test"); + assertEquals(expLog.toString(), actLog.toString()); + + // append another k1=v1 pair to already added k=v and test + RMAuditLogger.add(Keys.OPERATION, OPERATION, actLog); + expLog.append("\tOPERATION=oper"); + assertEquals(expLog.toString(), actLog.toString()); + + // append another k1=null pair and test + RMAuditLogger.add(Keys.APPID, (String)null, actLog); + expLog.append("\tAPPID=null"); + assertEquals(expLog.toString(), actLog.toString()); + + // now add the target and check of the final string + RMAuditLogger.add(Keys.TARGET, TARGET, actLog); + expLog.append("\tTARGET=tgt"); + assertEquals(expLog.toString(), actLog.toString()); + } + + + /** + * Test the AuditLog format for successful events. + */ + private void testSuccessLogFormatHelper(boolean checkIP, ApplicationId appId, + ApplicationAttemptId attemptId, ContainerId containerId) { + String sLog = RMAuditLogger.createSuccessLog(USER, OPERATION, TARGET, + appId, attemptId, containerId); + StringBuilder expLog = new StringBuilder(); + expLog.append("USER=test\t"); + if (checkIP) { + InetAddress ip = Server.getRemoteIp(); + expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t"); + } + expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=SUCCESS"); + + if (appId != null) { + expLog.append("\tAPPID=app_1"); + } + if (attemptId != null) { + expLog.append("\tAPPATTEMPTID=app_attempt_1"); + } + if (containerId != null) { + expLog.append("\tCONTAINERID=container_1"); + } + assertEquals(expLog.toString(), sLog); + } + + /** + * Test the AuditLog format for successful events passing nulls. + */ + private void testSuccessLogNulls(boolean checkIP) { + String sLog = RMAuditLogger.createSuccessLog(null, null, null, null, + null, null); + StringBuilder expLog = new StringBuilder(); + expLog.append("USER=null\t"); + if (checkIP) { + InetAddress ip = Server.getRemoteIp(); + expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t"); + } + expLog.append("OPERATION=null\tTARGET=null\tRESULT=SUCCESS"); + assertEquals(expLog.toString(), sLog); + } + + /** + * Test the AuditLog format for successful events with the various + * parameters. + */ + private void testSuccessLogFormat(boolean checkIP) { + testSuccessLogFormatHelper(checkIP, null, null, null); + testSuccessLogFormatHelper(checkIP, APPID, null, null); + testSuccessLogFormatHelper(checkIP, null, null, CONTAINERID); + testSuccessLogFormatHelper(checkIP, null, ATTEMPTID, null); + testSuccessLogFormatHelper(checkIP, APPID, ATTEMPTID, null); + testSuccessLogFormatHelper(checkIP, APPID, null, CONTAINERID); + testSuccessLogFormatHelper(checkIP, null, ATTEMPTID, CONTAINERID); + testSuccessLogFormatHelper(checkIP, APPID, ATTEMPTID, CONTAINERID); + testSuccessLogNulls(checkIP); + } + + + /** + * Test the AuditLog format for failure events. + */ + private void testFailureLogFormatHelper(boolean checkIP, ApplicationId appId, + ApplicationAttemptId attemptId, ContainerId containerId) { + String fLog = + RMAuditLogger.createFailureLog(USER, OPERATION, PERM, TARGET, DESC, + appId, attemptId, containerId); + StringBuilder expLog = new StringBuilder(); + expLog.append("USER=test\t"); + if (checkIP) { + InetAddress ip = Server.getRemoteIp(); + expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t"); + } + expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=FAILURE\t"); + expLog.append("DESCRIPTION=description of an audit log"); + expLog.append("\tPERMISSIONS=admin group"); + if (appId != null) { + expLog.append("\tAPPID=app_1"); + } + if (attemptId != null) { + expLog.append("\tAPPATTEMPTID=app_attempt_1"); + } + if (containerId != null) { + expLog.append("\tCONTAINERID=container_1"); + } + assertEquals(expLog.toString(), fLog); + } + + /** + * Test the AuditLog format for failure events with the various + * parameters. + */ + private void testFailureLogFormat(boolean checkIP) { + testFailureLogFormatHelper(checkIP, null, null, null); + testFailureLogFormatHelper(checkIP, APPID, null, null); + testFailureLogFormatHelper(checkIP, null, null, CONTAINERID); + testFailureLogFormatHelper(checkIP, null, ATTEMPTID, null); + testFailureLogFormatHelper(checkIP, APPID, ATTEMPTID, null); + testFailureLogFormatHelper(checkIP, APPID, null, CONTAINERID); + testFailureLogFormatHelper(checkIP, null, ATTEMPTID, CONTAINERID); + testFailureLogFormatHelper(checkIP, APPID, ATTEMPTID, CONTAINERID); + } + + /** + * Test {@link RMAuditLogger} without IP set. + */ + @Test + public void testRMAuditLoggerWithoutIP() throws Exception { + // test without ip + testSuccessLogFormat(false); + testFailureLogFormat(false); + } + + /** + * A special extension of {@link TestImpl} RPC server with + * {@link TestImpl#ping()} testing the audit logs. + */ + private class MyTestRPCServer extends TestImpl { + @Override + public void ping() { + // test with ip set + testSuccessLogFormat(true); + testFailureLogFormat(true); + } + } + + /** + * Test {@link RMAuditLogger} with IP set. + */ + @Test + public void testRMAuditLoggerWithIP() throws Exception { + Configuration conf = new Configuration(); + // start the IPC server + Server server = RPC.getServer(new MyTestRPCServer(), "0.0.0.0", 0, conf); + server.start(); + + InetSocketAddress addr = NetUtils.getConnectAddress(server); + + // Make a client connection and test the audit log + TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class, + TestProtocol.versionID, addr, conf); + // Start the testcase + proxy.ping(); + + server.stop(); + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java index 048af90d39..6df30629ea 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java @@ -36,6 +36,9 @@ public class MockRMApp implements RMApp { RMAppState state = RMAppState.NEW; int failCount = 0; ApplicationId id; + String url = null; + StringBuilder diagnostics = new StringBuilder(); + RMAppAttempt attempt; public MockRMApp(int newid, long time, RMAppState newState) { finish = time; @@ -48,6 +51,11 @@ public MockRMApp(int newid, long time, RMAppState newState, String userName) { user = userName; } + public MockRMApp(int newid, long time, RMAppState newState, String userName, String diag) { + this(newid, time, newState, userName); + this.diagnostics = new StringBuilder(diag); + } + @Override public ApplicationId getApplicationId() { return id; @@ -58,11 +66,19 @@ public RMAppState getState() { return state; } + public void setState(RMAppState state) { + this.state = state; + } + @Override public String getUser() { return user; } + public void setUser(String user) { + this.user = user; + } + @Override public float getProgress() { return (float) 0.0; @@ -78,14 +94,26 @@ public String getQueue() { return queue; } + public void setQueue(String queue) { + this.queue = queue; + } + @Override public String getName() { return name; } + public void setName(String name) { + this.name = name; + } + @Override public RMAppAttempt getCurrentAppAttempt() { - throw new UnsupportedOperationException("Not supported yet."); + return attempt; + } + + public void setCurrentAppAttempt(RMAppAttempt attempt) { + this.attempt = attempt; } @Override @@ -103,19 +131,35 @@ public long getFinishTime() { return finish; } + public void setFinishTime(long time) { + this.finish = time; + } + @Override public long getStartTime() { return start; } + public void setStartTime(long time) { + this.start = time; + } + @Override public String getTrackingUrl() { - throw new UnsupportedOperationException("Not supported yet."); + return url; + } + + public void setTrackingUrl(String url) { + this.url = url; } @Override public StringBuilder getDiagnostics() { - throw new UnsupportedOperationException("Not supported yet."); + return diagnostics; + } + + public void setDiagnostics(String diag) { + this.diagnostics = new StringBuilder(diag); } public void handle(RMAppEvent event) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java new file mode 100644 index 0000000000..fe9b15b64f --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -0,0 +1,234 @@ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestApplicationLimits { + + private static final Log LOG = LogFactory.getLog(TestApplicationLimits.class); + final static int GB = 1024; + + LeafQueue queue; + + @Before + public void setUp() { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + setupQueueConfiguration(csConf); + + + CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class); + when(csContext.getConfiguration()).thenReturn(csConf); + when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB)); + when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB)); + when(csContext.getClusterResources()).thenReturn(Resources.createResource(10 * 16 * GB)); + + Map queues = new HashMap(); + Queue root = + CapacityScheduler.parseQueue(csContext, csConf, null, "root", + queues, queues, + CapacityScheduler.queueComparator, + CapacityScheduler.applicationComparator, + TestUtils.spyHook); + + + queue = spy( + new LeafQueue(csContext, A, root, + CapacityScheduler.applicationComparator, null) + ); + + // Stub out ACL checks + doReturn(true). + when(queue).hasAccess(any(QueueACL.class), + any(UserGroupInformation.class)); + + // Some default values + doReturn(100).when(queue).getMaxApplications(); + doReturn(25).when(queue).getMaxApplicationsPerUser(); + doReturn(10).when(queue).getMaximumActiveApplications(); + doReturn(2).when(queue).getMaximumActiveApplicationsPerUser(); + } + + private static final String A = "a"; + private static final String B = "b"; + private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) { + + // Define top-level queues + conf.setQueues(CapacityScheduler.ROOT, new String[] {A, B}); + conf.setCapacity(CapacityScheduler.ROOT, 100); + + final String Q_A = CapacityScheduler.ROOT + "." + A; + conf.setCapacity(Q_A, 10); + + final String Q_B = CapacityScheduler.ROOT + "." + B; + conf.setCapacity(Q_B, 90); + + LOG.info("Setup top-level queues a and b"); + } + + private SchedulerApp getMockApplication(int appId, String user) { + SchedulerApp application = mock(SchedulerApp.class); + ApplicationAttemptId applicationAttemptId = + TestUtils.getMockApplicationAttemptId(appId, 0); + doReturn(applicationAttemptId.getApplicationId()). + when(application).getApplicationId(); + doReturn(applicationAttemptId). when(application).getApplicationAttemptId(); + doReturn(user).when(application).getUser(); + return application; + } + + @Test + public void testLimitsComputation() throws Exception { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + setupQueueConfiguration(csConf); + + + CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class); + when(csContext.getConfiguration()).thenReturn(csConf); + when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB)); + when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB)); + + // Say cluster has 100 nodes of 16G each + Resource clusterResource = Resources.createResource(100 * 16 * GB); + when(csContext.getClusterResources()).thenReturn(clusterResource); + + Map queues = new HashMap(); + Queue root = + CapacityScheduler.parseQueue(csContext, csConf, null, "root", + queues, queues, + CapacityScheduler.queueComparator, + CapacityScheduler.applicationComparator, + TestUtils.spyHook); + + LeafQueue queue = (LeafQueue)queues.get(A); + + LOG.info("Queue 'A' -" + + " maxActiveApplications=" + queue.getMaximumActiveApplications() + + " maxActiveApplicationsPerUser=" + + queue.getMaximumActiveApplicationsPerUser()); + int expectedMaxActiveApps = + Math.max(1, + (int)((clusterResource.getMemory() / LeafQueue.DEFAULT_AM_RESOURCE) * + csConf.getMaximumApplicationMasterResourcePercent() * + queue.getAbsoluteCapacity())); + assertEquals(expectedMaxActiveApps, + queue.getMaximumActiveApplications()); + assertEquals((int)(expectedMaxActiveApps * (queue.getUserLimit() / 100.0f) * + queue.getUserLimitFactor()), + queue.getMaximumActiveApplicationsPerUser()); + + // Add some nodes to the cluster & test new limits + clusterResource = Resources.createResource(120 * 16 * GB); + root.updateClusterResource(clusterResource); + expectedMaxActiveApps = + Math.max(1, + (int)((clusterResource.getMemory() / LeafQueue.DEFAULT_AM_RESOURCE) * + csConf.getMaximumApplicationMasterResourcePercent() * + queue.getAbsoluteCapacity())); + assertEquals(expectedMaxActiveApps, + queue.getMaximumActiveApplications()); + assertEquals((int)(expectedMaxActiveApps * (queue.getUserLimit() / 100.0f) * + queue.getUserLimitFactor()), + queue.getMaximumActiveApplicationsPerUser()); + + } + + @Test + public void testActiveApplicationLimits() throws Exception { + final String user_0 = "user_0"; + final String user_1 = "user_1"; + + int APPLICATION_ID = 0; + // Submit first application + SchedulerApp app_0 = getMockApplication(APPLICATION_ID++, user_0); + queue.submitApplication(app_0, user_0, A); + assertEquals(1, queue.getNumActiveApplications()); + assertEquals(0, queue.getNumPendingApplications()); + assertEquals(1, queue.getNumActiveApplications(user_0)); + assertEquals(0, queue.getNumPendingApplications(user_0)); + + // Submit second application + SchedulerApp app_1 = getMockApplication(APPLICATION_ID++, user_0); + queue.submitApplication(app_1, user_0, A); + assertEquals(2, queue.getNumActiveApplications()); + assertEquals(0, queue.getNumPendingApplications()); + assertEquals(2, queue.getNumActiveApplications(user_0)); + assertEquals(0, queue.getNumPendingApplications(user_0)); + + // Submit third application, should remain pending + SchedulerApp app_2 = getMockApplication(APPLICATION_ID++, user_0); + queue.submitApplication(app_2, user_0, A); + assertEquals(2, queue.getNumActiveApplications()); + assertEquals(1, queue.getNumPendingApplications()); + assertEquals(2, queue.getNumActiveApplications(user_0)); + assertEquals(1, queue.getNumPendingApplications(user_0)); + + // Finish one application, app_2 should be activated + queue.finishApplication(app_0, A); + assertEquals(2, queue.getNumActiveApplications()); + assertEquals(0, queue.getNumPendingApplications()); + assertEquals(2, queue.getNumActiveApplications(user_0)); + assertEquals(0, queue.getNumPendingApplications(user_0)); + + // Submit another one for user_0 + SchedulerApp app_3 = getMockApplication(APPLICATION_ID++, user_0); + queue.submitApplication(app_3, user_0, A); + assertEquals(2, queue.getNumActiveApplications()); + assertEquals(1, queue.getNumPendingApplications()); + assertEquals(2, queue.getNumActiveApplications(user_0)); + assertEquals(1, queue.getNumPendingApplications(user_0)); + + // Change queue limit to be smaller so 2 users can fill it up + doReturn(3).when(queue).getMaximumActiveApplications(); + + // Submit first app for user_1 + SchedulerApp app_4 = getMockApplication(APPLICATION_ID++, user_1); + queue.submitApplication(app_4, user_1, A); + assertEquals(3, queue.getNumActiveApplications()); + assertEquals(1, queue.getNumPendingApplications()); + assertEquals(2, queue.getNumActiveApplications(user_0)); + assertEquals(1, queue.getNumPendingApplications(user_0)); + assertEquals(1, queue.getNumActiveApplications(user_1)); + assertEquals(0, queue.getNumPendingApplications(user_1)); + + // Submit second app for user_1, should block due to queue-limit + SchedulerApp app_5 = getMockApplication(APPLICATION_ID++, user_1); + queue.submitApplication(app_5, user_1, A); + assertEquals(3, queue.getNumActiveApplications()); + assertEquals(2, queue.getNumPendingApplications()); + assertEquals(2, queue.getNumActiveApplications(user_0)); + assertEquals(1, queue.getNumPendingApplications(user_0)); + assertEquals(1, queue.getNumActiveApplications(user_1)); + assertEquals(1, queue.getNumPendingApplications(user_1)); + + // Now finish one app of user_1 so app_5 should be activated + queue.finishApplication(app_4, A); + assertEquals(3, queue.getNumActiveApplications()); + assertEquals(1, queue.getNumPendingApplications()); + assertEquals(2, queue.getNumActiveApplications(user_0)); + assertEquals(1, queue.getNumPendingApplications(user_0)); + assertEquals(1, queue.getNumActiveApplications(user_1)); + assertEquals(0, queue.getNumPendingApplications(user_1)); + } + + @After + public void tearDown() { + + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index d60a4e8924..a3ac403306 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -83,8 +83,12 @@ public void setUp() throws Exception { csContext = mock(CapacitySchedulerContext.class); when(csContext.getConfiguration()).thenReturn(csConf); - when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB)); - when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB)); + when(csContext.getMinimumResourceCapability()). + thenReturn(Resources.createResource(GB)); + when(csContext.getMaximumResourceCapability()). + thenReturn(Resources.createResource(16*GB)); + when(csContext.getClusterResources()). + thenReturn(Resources.createResource(100 * 16 * GB)); root = CapacityScheduler.parseQueue(csContext, csConf, null, "root", queues, queues, @@ -447,7 +451,7 @@ public void testReservation() throws Exception { SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB); final int numNodes = 1; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = Resources.createResource(numNodes * (4*GB)); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -504,6 +508,121 @@ public void testReservation() throws Exception { assertEquals(4*GB, node_0.getUsedResource().getMemory()); } + @Test + public void testReservationExchange() throws Exception { + + // Manipulate queue 'a' + LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A)); + a.setUserLimitFactor(10); + + // Users + final String user_0 = "user_0"; + final String user_1 = "user_1"; + + // Submit applications + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + SchedulerApp app_0 = + new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null); + a.submitApplication(app_0, user_0, A); + + final ApplicationAttemptId appAttemptId_1 = + TestUtils.getMockApplicationAttemptId(1, 0); + SchedulerApp app_1 = + new SchedulerApp(appAttemptId_1, user_1, a, rmContext, null); + a.submitApplication(app_1, user_1, A); + + // Setup some nodes + String host_0 = "host_0"; + SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB); + + String host_1 = "host_1"; + SchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB); + + final int numNodes = 2; + Resource clusterResource = Resources.createResource(numNodes * (4*GB)); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + when(csContext.getMaximumResourceCapability()).thenReturn( + Resources.createResource(4*GB)); + when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4*GB)); + when(a.getMinimumAllocationFactor()).thenReturn(0.25f); // 1G / 4G + + // Setup resource-requests + Priority priority = TestUtils.createMockPriority(1); + app_0.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 2, priority, + recordFactory))); + + app_1.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(RMNodeImpl.ANY, 4*GB, 1, priority, + recordFactory))); + + // Start testing... + + // Only 1 container + a.assignContainers(clusterResource, node_0); + assertEquals(1*GB, a.getUsedResources().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + + // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also + // you can get one container more than user-limit + a.assignContainers(clusterResource, node_0); + assertEquals(2*GB, a.getUsedResources().getMemory()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + + // Now, reservation should kick in for app_1 + a.assignContainers(clusterResource, node_0); + assertEquals(6*GB, a.getUsedResources().getMemory()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(2*GB, node_0.getUsedResource().getMemory()); + + // Now free 1 container from app_0 i.e. 1G, and re-reserve it + a.completedContainer(clusterResource, app_0, node_0, + app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL); + a.assignContainers(clusterResource, node_0); + assertEquals(5*GB, a.getUsedResources().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(1*GB, node_0.getUsedResource().getMemory()); + assertEquals(1, app_1.getReReservations(priority)); + + // Re-reserve + a.assignContainers(clusterResource, node_0); + assertEquals(5*GB, a.getUsedResources().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(1*GB, node_0.getUsedResource().getMemory()); + assertEquals(2, app_1.getReReservations(priority)); + + // Try to schedule on node_1 now, should *move* the reservation + a.assignContainers(clusterResource, node_1); + assertEquals(9*GB, a.getUsedResources().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(4*GB, node_1.getUsedResource().getMemory()); + // Doesn't change yet... only when reservation is cancelled or a different + // container is reserved + assertEquals(2, app_1.getReReservations(priority)); + + // Now finish another container from app_0 and see the reservation cancelled + a.completedContainer(clusterResource, app_0, node_0, + app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL); + a.assignContainers(clusterResource, node_0); + assertEquals(4*GB, a.getUsedResources().getMemory()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(0*GB, node_0.getUsedResource().getMemory()); + } + + @Test public void testLocalityScheduling() throws Exception { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 6894d857d1..ea635270e0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -60,6 +60,8 @@ public void setUp() throws Exception { Resources.createResource(GB)); when(csContext.getMaximumResourceCapability()).thenReturn( Resources.createResource(16*GB)); + when(csContext.getClusterResources()). + thenReturn(Resources.createResource(100 * 16 * GB)); } private static final String A = "a"; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index ae7853be67..0d59711578 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -116,6 +116,13 @@ public static ResourceRequest createResourceRequest( return request; } + public static ApplicationId getMockApplicationId(int appId) { + ApplicationId applicationId = mock(ApplicationId.class); + when(applicationId.getClusterTimestamp()).thenReturn(0L); + when(applicationId.getId()).thenReturn(appId); + return applicationId; + } + public static ApplicationAttemptId getMockApplicationAttemptId(int appId, int attemptId) { ApplicationId applicationId = mock(ApplicationId.class); diff --git a/hadoop-mapreduce-project/hadoop-yarn/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/pom.xml index b49601baaf..aad5e4a137 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/pom.xml @@ -56,7 +56,7 @@ org.apache.avro avro - 1.5.2 + 1.5.3 org.mortbay.jetty diff --git a/hadoop-mapreduce-project/ivy/ivysettings.xml b/hadoop-mapreduce-project/ivy/ivysettings.xml index 7631ec9226..ae606a221b 100644 --- a/hadoop-mapreduce-project/ivy/ivysettings.xml +++ b/hadoop-mapreduce-project/ivy/ivysettings.xml @@ -39,11 +39,11 @@ - + + checkmodified="true" changingPattern=".*SNAPSHOT" checkconsistency="false"/> - + diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index f47a8a79a3..2ad5b3dec0 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -64,7 +64,7 @@ org.apache.avro avro - 1.5.2 + 1.5.3 org.mortbay.jetty diff --git a/hadoop-mapreduce-project/src/c++/task-controller/.autom4te.cfg b/hadoop-mapreduce-project/src/c++/task-controller/.autom4te.cfg deleted file mode 100644 index d21d1c9877..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/.autom4te.cfg +++ /dev/null @@ -1,42 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# -# autom4te configuration for hadoop utils library -# - -begin-language: "Autoheader-preselections" -args: --no-cache -end-language: "Autoheader-preselections" - -begin-language: "Automake-preselections" -args: --no-cache -end-language: "Automake-preselections" - -begin-language: "Autoreconf-preselections" -args: --no-cache -end-language: "Autoreconf-preselections" - -begin-language: "Autoconf-without-aclocal-m4" -args: --no-cache -end-language: "Autoconf-without-aclocal-m4" - -begin-language: "Autoconf" -args: --no-cache -end-language: "Autoconf" - diff --git a/hadoop-mapreduce-project/src/c++/task-controller/Makefile.am b/hadoop-mapreduce-project/src/c++/task-controller/Makefile.am deleted file mode 100644 index 43ee05b5a8..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/Makefile.am +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ACLOCAL_AMFLAGS = -I ../utils/m4 -AM_CFLAGS = -Wall - -bindir = $(exec_prefix) - -bin_PROGRAMS = task-controller -check_PROGRAMS = tests/test-task-controller -TESTS = $(check_PROGRAMS) - -task_controller_SOURCES = main.c task-controller.c configuration.c \ - task-controller.h - -tests_test_task_controller_SOURCES = tests/test-task-controller.c \ - task-controller.c configuration.c task-controller.h - -test: $(check_PROGRAMS) - @echo Done with $< diff --git a/hadoop-mapreduce-project/src/c++/task-controller/configuration.c b/hadoop-mapreduce-project/src/c++/task-controller/configuration.c deleted file mode 100644 index bbacbf7efc..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/configuration.c +++ /dev/null @@ -1,245 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "configuration.h" - - -char * hadoop_conf_dir; - -struct configuration config={.size=0, .confdetails=NULL}; - -//clean up method for freeing configuration -void free_configurations() { - int i = 0; - for (i = 0; i < config.size; i++) { - if (config.confdetails[i]->key != NULL) { - free((void *)config.confdetails[i]->key); - } - if (config.confdetails[i]->value != NULL) { - free((void *)config.confdetails[i]->value); - } - free(config.confdetails[i]); - } - if (config.size > 0) { - free(config.confdetails); - } - config.size = 0; -} - -//function used to load the configurations present in the secure config -void get_configs() { - FILE *conf_file; - char *line; - char *equaltok; - char *temp_equaltok; - size_t linesize = 1000; - int size_read = 0; - int str_len = 0; - char *file_name = NULL; - -#ifndef HADOOP_CONF_DIR - str_len = strlen(CONF_FILE_PATTERN) + strlen(hadoop_conf_dir); - file_name = (char *) malloc(sizeof(char) * (str_len + 1)); -#else - str_len = strlen(CONF_FILE_PATTERN) + strlen(HADOOP_CONF_DIR); - file_name = (char *) malloc(sizeof(char) * (str_len + 1)); -#endif - - if (file_name == NULL) { - fprintf(LOGFILE, "Malloc failed :Out of memory \n"); - return; - } - memset(file_name,'\0',str_len +1); -#ifndef HADOOP_CONF_DIR - snprintf(file_name,str_len, CONF_FILE_PATTERN, hadoop_conf_dir); -#else - snprintf(file_name, str_len, CONF_FILE_PATTERN, HADOOP_CONF_DIR); -#endif - -#ifdef DEBUG - fprintf(LOGFILE, "get_configs :Conf file name is : %s \n", file_name); -#endif - - //allocate space for ten configuration items. - config.confdetails = (struct confentry **) malloc(sizeof(struct confentry *) - * MAX_SIZE); - config.size = 0; - conf_file = fopen(file_name, "r"); - if (conf_file == NULL) { - fprintf(LOGFILE, "Invalid conf file provided : %s \n", file_name); - free(file_name); - return; - } - while(!feof(conf_file)) { - line = (char *) malloc(linesize); - if(line == NULL) { - fprintf(LOGFILE, "malloc failed while reading configuration file.\n"); - goto cleanup; - } - size_read = getline(&line,&linesize,conf_file); - //feof returns true only after we read past EOF. - //so a file with no new line, at last can reach this place - //if size_read returns negative check for eof condition - if (size_read == -1) { - if(!feof(conf_file)){ - fprintf(LOGFILE, "getline returned error.\n"); - goto cleanup; - }else { - break; - } - } - //trim the ending new line - line[strlen(line)-1] = '\0'; - //comment line - if(line[0] == '#') { - free(line); - continue; - } - //tokenize first to get key and list of values. - //if no equals is found ignore this line, can be an empty line also - equaltok = strtok_r(line, "=", &temp_equaltok); - if(equaltok == NULL) { - free(line); - continue; - } - config.confdetails[config.size] = (struct confentry *) malloc( - sizeof(struct confentry)); - if(config.confdetails[config.size] == NULL) { - fprintf(LOGFILE, - "Failed allocating memory for single configuration item\n"); - goto cleanup; - } - -#ifdef DEBUG - fprintf(LOGFILE, "get_configs : Adding conf key : %s \n", equaltok); -#endif - - memset(config.confdetails[config.size], 0, sizeof(struct confentry)); - config.confdetails[config.size]->key = (char *) malloc( - sizeof(char) * (strlen(equaltok)+1)); - strcpy((char *)config.confdetails[config.size]->key, equaltok); - equaltok = strtok_r(NULL, "=", &temp_equaltok); - if (equaltok == NULL) { - fprintf(LOGFILE, "configuration tokenization failed \n"); - goto cleanup; - } - //means value is commented so don't store the key - if(equaltok[0] == '#') { - free(line); - free((void *)config.confdetails[config.size]->key); - free(config.confdetails[config.size]); - continue; - } - -#ifdef DEBUG - fprintf(LOGFILE, "get_configs : Adding conf value : %s \n", equaltok); -#endif - - config.confdetails[config.size]->value = (char *) malloc( - sizeof(char) * (strlen(equaltok)+1)); - strcpy((char *)config.confdetails[config.size]->value, equaltok); - if((config.size + 1) % MAX_SIZE == 0) { - config.confdetails = (struct confentry **) realloc(config.confdetails, - sizeof(struct confentry **) * (MAX_SIZE + config.size)); - if (config.confdetails == NULL) { - fprintf(LOGFILE, - "Failed re-allocating memory for configuration items\n"); - goto cleanup; - } - } - if(config.confdetails[config.size] ) - config.size++; - free(line); - } - - //close the file - fclose(conf_file); - //clean up allocated file name - free(file_name); - return; - //free spaces alloced. - cleanup: - if (line != NULL) { - free(line); - } - fclose(conf_file); - free(file_name); - free_configurations(); - return; -} - -/* - * function used to get a configuration value. - * The function for the first time populates the configuration details into - * array, next time onwards used the populated array. - * - */ -const char * get_value(const char* key) { - int count; - if (config.size == 0) { - get_configs(); - } - if (config.size == 0) { - fprintf(LOGFILE, "Invalid configuration provided\n"); - return NULL; - } - for (count = 0; count < config.size; count++) { - if (strcmp(config.confdetails[count]->key, key) == 0) { - return strdup(config.confdetails[count]->value); - } - } - return NULL; -} - -/** - * Function to return an array of values for a key. - * Value delimiter is assumed to be a comma. - */ -const char ** get_values(const char * key) { - const char ** toPass = NULL; - const char *value = get_value(key); - char *tempTok = NULL; - char *tempstr = NULL; - int size = 0; - int len; - //first allocate any array of 10 - if(value != NULL) { - toPass = (const char **) malloc(sizeof(char *) * MAX_SIZE); - tempTok = strtok_r((char *)value, ",", &tempstr); - if (tempTok != NULL) { - while (1) { - toPass[size++] = tempTok; - tempTok = strtok_r(NULL, ",", &tempstr); - if(tempTok == NULL){ - break; - } - if((size % MAX_SIZE) == 0) { - toPass = (const char **) realloc(toPass,(sizeof(char *) * - (MAX_SIZE * ((size/MAX_SIZE) +1)))); - } - } - } else { - toPass[size] = (char *)value; - } - } - if(size > 0) { - toPass[size] = NULL; - } - return toPass; -} - diff --git a/hadoop-mapreduce-project/src/c++/task-controller/configuration.h b/hadoop-mapreduce-project/src/c++/task-controller/configuration.h deleted file mode 100644 index d1ee6d6337..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/configuration.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include - -#define INCREMENT_SIZE 1000 -#define MAX_SIZE 10 - -struct confentry { - const char *key; - const char *value; -}; - - -struct configuration { - int size; - struct confentry **confdetails; -}; - -FILE *LOGFILE; - -#ifdef HADOOP_CONF_DIR - #define CONF_FILE_PATTERN "%s/taskcontroller.cfg" -#else - #define CONF_FILE_PATTERN "%s/conf/taskcontroller.cfg" -#endif - -extern struct configuration config; -//configuration file contents -#ifndef HADOOP_CONF_DIR - extern char *hadoop_conf_dir; -#endif -//method exposed to get the configurations -const char * get_value(const char* key); -//method to free allocated configuration -void free_configurations(); - -//function to return array of values pointing to the key. Values are -//comma seperated strings. -const char ** get_values(const char* key); diff --git a/hadoop-mapreduce-project/src/c++/task-controller/configure.ac b/hadoop-mapreduce-project/src/c++/task-controller/configure.ac deleted file mode 100644 index 86fff4f410..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/configure.ac +++ /dev/null @@ -1,68 +0,0 @@ -# -*- Autoconf -*- -# Process this file with autoconf to produce a configure script. - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -AC_PREREQ(2.59) -AC_INIT([task-controller],[0.1]) - -#changing default prefix value to empty string, so that binary does not -#gets installed within system -AC_PREFIX_DEFAULT(.) - -#add new argument called -with-confdir -AC_ARG_WITH(confdir,[--with-confdir path to hadoop conf dir]) -AC_CONFIG_SRCDIR([task-controller.h]) -AC_CONFIG_AUX_DIR([config]) -AC_CONFIG_MACRO_DIR([../utils/m4]) -AM_INIT_AUTOMAKE([subdir-objects foreign no-dist]) - -# Checks for programs. -AC_PROG_CC - -# Checks for libraries. - -# Checks for header files. -AC_HEADER_STDC -AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h]) - -#check for HADOOP_CONF_DIR - - -if test "$with_confdir" != "" -then -AC_DEFINE_UNQUOTED(HADOOP_CONF_DIR, ["$with_confdir"], [Location of Hadoop configuration]) -fi -# Checks for typedefs, structures, and compiler characteristics. -AC_C_CONST -AC_TYPE_PID_T -AC_TYPE_MODE_T -AC_TYPE_SIZE_T - -# Checks for library functions. -AC_FUNC_MALLOC -AC_FUNC_REALLOC -AC_FUNC_CHOWN -AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT - -AC_HEADER_STDBOOL -AC_PROG_MAKE_SET diff --git a/hadoop-mapreduce-project/src/c++/task-controller/main.c b/hadoop-mapreduce-project/src/c++/task-controller/main.c deleted file mode 100644 index 216417e3d9..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/main.c +++ /dev/null @@ -1,260 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "task-controller.h" - -void open_log_file(const char *log_file) { - if (log_file == NULL) { - LOGFILE = stdout; - } else { - LOGFILE = fopen(log_file, "a"); - if (LOGFILE == NULL) { - fprintf(stdout, "Unable to open LOGFILE : %s \n", log_file); - LOGFILE = stdout; - } - if (LOGFILE != stdout) { - if (chmod(log_file, S_IREAD | S_IEXEC | S_IWRITE | S_IROTH | S_IWOTH - | S_IRGRP | S_IWGRP) < 0) { - fprintf(stdout, "Unable to change permission of the log file %s \n", - log_file); - fclose(LOGFILE); - fprintf(stdout, "changing log file to stdout"); - LOGFILE = stdout; - } - } - } -} - -void display_usage(FILE *stream) { - fprintf(stream, - "Usage: task-controller [-l logfile] user command command-args\n"); -} - -/** - * Check the permissions on taskcontroller to make sure that security is - * promisable. For this, we need task-controller binary to - * * be user-owned by root - * * be group-owned by a configured special group. - * * others do not have write or execute permissions - * * be setuid - */ -int check_taskcontroller_permissions(char *executable_file) { - - errno = 0; - char * resolved_path = (char *) canonicalize_file_name(executable_file); - if (resolved_path == NULL) { - fprintf(LOGFILE, - "Error resolving the canonical name for the executable : %s!", - strerror(errno)); - return -1; - } - - struct stat filestat; - errno = 0; - if (stat(resolved_path, &filestat) != 0) { - fprintf(LOGFILE, "Could not stat the executable : %s!.\n", strerror(errno)); - return -1; - } - - uid_t binary_euid = filestat.st_uid; // Binary's user owner - gid_t binary_egid = filestat.st_gid; // Binary's group owner - - // Effective uid should be root - if (binary_euid != 0) { - fprintf(LOGFILE, - "The task-controller binary should be user-owned by root.\n"); - return -1; - } - - // Get the group entry for the special_group - errno = 0; - struct group *special_group_entry = getgrgid(binary_egid); - if (special_group_entry == NULL) { - fprintf(LOGFILE, - "Unable to get information for effective group of the binary : %s\n", - strerror(errno)); - return -1; - } - - char * binary_group = special_group_entry->gr_name; - // verify that the group name of the special group - // is same as the one in configuration - if (check_variable_against_config(TT_GROUP_KEY, binary_group) != 0) { - fprintf(LOGFILE, - "Group of the binary does not match with that in configuration\n"); - return -1; - } - - // check others do not have write/execute permissions - if ((filestat.st_mode & S_IWOTH) == S_IWOTH || - (filestat.st_mode & S_IXOTH) == S_IXOTH) { - fprintf(LOGFILE, - "The task-controller binary should not have write or execute for others.\n"); - return -1; - } - - // Binary should be setuid executable - if ((filestat.st_mode & S_ISUID) != S_ISUID) { - fprintf(LOGFILE, - "The task-controller binary should be set setuid.\n"); - return -1; - } - - return 0; -} - -int main(int argc, char **argv) { - int command; - int next_option = 0; - const char * job_id = NULL; - const char * task_id = NULL; - const char * tt_root = NULL; - const char *log_dir = NULL; - const char * unique_string = NULL; - int exit_code = 0; - const char * task_pid = NULL; - const char* const short_options = "l:"; - const struct option long_options[] = { { "log", 1, NULL, 'l' }, { NULL, 0, - NULL, 0 } }; - - const char* log_file = NULL; - char * dir_to_be_deleted = NULL; - int conf_dir_len = 0; - - char *executable_file = argv[0]; -#ifndef HADOOP_CONF_DIR - conf_dir_len = (strlen(executable_file) - strlen(EXEC_PATTERN)) + 1; - if (conf_dir_len < 1) { - // We didn't get an absolute path to our executable_file; bail. - printf("Cannot find configuration directory.\n"); - printf("This program must be run with its full absolute path.\n"); - return INVALID_CONF_DIR; - } else { - hadoop_conf_dir = (char *) malloc (sizeof(char) * conf_dir_len); - strncpy(hadoop_conf_dir, executable_file, - (strlen(executable_file) - strlen(EXEC_PATTERN))); - hadoop_conf_dir[(strlen(executable_file) - strlen(EXEC_PATTERN))] = '\0'; - } -#endif - do { - next_option = getopt_long(argc, argv, short_options, long_options, NULL); - switch (next_option) { - case 'l': - log_file = optarg; - default: - break; - } - } while (next_option != -1); - - open_log_file(log_file); - - if (check_taskcontroller_permissions(executable_file) != 0) { - fprintf(LOGFILE, "Invalid permissions on task-controller binary.\n"); - return INVALID_TASKCONTROLLER_PERMISSIONS; - } - - //Minimum number of arguments required to run the task-controller - //command-name user command tt-root - if (argc < 3) { - display_usage(stdout); - return INVALID_ARGUMENT_NUMBER; - } - - //checks done for user name - //checks done if the user is root or not. - if (argv[optind] == NULL) { - fprintf(LOGFILE, "Invalid user name \n"); - return INVALID_USER_NAME; - } - if (get_user_details(argv[optind]) != 0) { - return INVALID_USER_NAME; - } - //implicit conversion to int instead of __gid_t and __uid_t - if (user_detail->pw_gid == 0 || user_detail->pw_uid == 0) { - fprintf(LOGFILE, "Cannot run tasks as super user\n"); - return SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS; - } - optind = optind + 1; - command = atoi(argv[optind++]); - - fprintf(LOGFILE, "main : command provided %d\n",command); - fprintf(LOGFILE, "main : user is %s\n", user_detail->pw_name); - - switch (command) { - case INITIALIZE_USER: - exit_code = initialize_user(user_detail->pw_name); - break; - case INITIALIZE_JOB: - job_id = argv[optind++]; - exit_code = initialize_job(job_id, user_detail->pw_name); - break; - case INITIALIZE_DISTRIBUTEDCACHE_FILE: - tt_root = argv[optind++]; - unique_string = argv[optind++]; - exit_code = initialize_distributed_cache_file(tt_root, unique_string, - user_detail->pw_name); - break; - case LAUNCH_TASK_JVM: - tt_root = argv[optind++]; - job_id = argv[optind++]; - task_id = argv[optind++]; - exit_code - = run_task_as_user(user_detail->pw_name, job_id, task_id, tt_root); - break; - case INITIALIZE_TASK: - job_id = argv[optind++]; - task_id = argv[optind++]; - exit_code = initialize_task(job_id, task_id, user_detail->pw_name); - break; - case TERMINATE_TASK_JVM: - task_pid = argv[optind++]; - exit_code = kill_user_task(user_detail->pw_name, task_pid, SIGTERM); - break; - case KILL_TASK_JVM: - task_pid = argv[optind++]; - exit_code = kill_user_task(user_detail->pw_name, task_pid, SIGKILL); - break; - case RUN_DEBUG_SCRIPT: - tt_root = argv[optind++]; - job_id = argv[optind++]; - task_id = argv[optind++]; - exit_code - = run_debug_script_as_user(user_detail->pw_name, job_id, task_id, tt_root); - break; - case SIGQUIT_TASK_JVM: - task_pid = argv[optind++]; - exit_code = kill_user_task(user_detail->pw_name, task_pid, SIGQUIT); - break; - case ENABLE_TASK_FOR_CLEANUP: - tt_root = argv[optind++]; - job_id = argv[optind++]; - dir_to_be_deleted = argv[optind++]; - exit_code = enable_task_for_cleanup(tt_root, user_detail->pw_name, job_id, - dir_to_be_deleted); - break; - case ENABLE_JOB_FOR_CLEANUP: - tt_root = argv[optind++]; - job_id = argv[optind++]; - exit_code = enable_job_for_cleanup(tt_root, user_detail->pw_name, job_id); - break; - default: - exit_code = INVALID_COMMAND_PROVIDED; - } - fflush(LOGFILE); - fclose(LOGFILE); - return exit_code; -} diff --git a/hadoop-mapreduce-project/src/c++/task-controller/task-controller.c b/hadoop-mapreduce-project/src/c++/task-controller/task-controller.c deleted file mode 100644 index eb0cbaa645..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/task-controller.c +++ /dev/null @@ -1,1300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "task-controller.h" - -//struct to store the user details -struct passwd *user_detail = NULL; - -//LOGFILE -FILE *LOGFILE; - -//placeholder for global cleanup operations -void cleanup() { - free_configurations(); -} - -//change the user to passed user for executing/killing tasks -int change_user(const char * user) { - if (get_user_details(user) < 0) { - return -1; - } - - if(initgroups(user_detail->pw_name, user_detail->pw_gid) != 0) { - fprintf(LOGFILE, "unable to initgroups : %s\n", strerror(errno)); - cleanup(); - return SETUID_OPER_FAILED; - } - - errno = 0; - - setgid(user_detail->pw_gid); - if (errno != 0) { - fprintf(LOGFILE, "unable to setgid : %s\n", strerror(errno)); - cleanup(); - return SETUID_OPER_FAILED; - } - - setegid(user_detail->pw_gid); - if (errno != 0) { - fprintf(LOGFILE, "unable to setegid : %s\n", strerror(errno)); - cleanup(); - return SETUID_OPER_FAILED; - } - - setuid(user_detail->pw_uid); - if (errno != 0) { - fprintf(LOGFILE, "unable to setuid : %s\n", strerror(errno)); - cleanup(); - return SETUID_OPER_FAILED; - } - - seteuid(user_detail->pw_uid); - if (errno != 0) { - fprintf(LOGFILE, "unable to seteuid : %s\n", strerror(errno)); - cleanup(); - return SETUID_OPER_FAILED; - } - return 0; -} - -/** - * Checks the passed value for the variable config_key against the values in - * the configuration. - * Returns 0 if the passed value is found in the configuration, - * -1 otherwise - */ -int check_variable_against_config(const char *config_key, - const char *passed_value) { - - if (config_key == NULL || passed_value == NULL) { - return -1; - } - - int found = -1; - - const char **config_value = get_values(config_key); - - if (config_value == NULL) { - fprintf(LOGFILE, "%s is not configured.\n", config_key); - return -1; - } - - char *full_config_value = (char *)get_value(config_key); - - char **config_val_ptr = (char **) config_value; - while (*config_val_ptr != NULL) { - if (strcmp(*config_val_ptr, passed_value) == 0) { - found = 0; - break; - } - config_val_ptr++; - } - - if (found != 0) { - fprintf( - LOGFILE, - "Invalid value passed: \ - Configured value of %s is %s. \ - Passed value is %s.\n", - config_key, full_config_value, passed_value); - } - free(full_config_value); - free(config_value); - return found; -} - -/** - * Utility function to concatenate argB to argA using the concat_pattern - */ -char *concatenate(char *concat_pattern, char *return_path_name, int numArgs, - ...) { - va_list ap; - va_start(ap, numArgs); - int strlen_args = 0; - char *arg = NULL; - int j; - for (j = 0; j < numArgs; j++) { - arg = va_arg(ap, char*); - if (arg == NULL) { - fprintf(LOGFILE, "One of the arguments passed for %s in null.\n", - return_path_name); - return NULL; - } - strlen_args += strlen(arg); - } - va_end(ap); - - char *return_path = NULL; - int str_len = strlen(concat_pattern) + strlen_args; - - return_path = (char *) malloc(sizeof(char) * (str_len + 1)); - if (return_path == NULL) { - fprintf(LOGFILE, "Unable to allocate memory for %s.\n", return_path_name); - return NULL; - } - memset(return_path, '\0', str_len + 1); - va_start(ap, numArgs); - vsnprintf(return_path, str_len, concat_pattern, ap); - va_end(ap); - return return_path; -} - -/** - * Get the job-directory path from tt_root, user name and job-id - */ -char *get_job_directory(const char * tt_root, const char *user, - const char *jobid) { - return concatenate(TT_JOB_DIR_PATTERN, "job_dir_path", 3, tt_root, user, - jobid); -} - -/** - * Get the user directory of a particular user - */ -char *get_user_directory(const char *tt_root, const char *user) { - return concatenate(USER_DIR_PATTERN, "user_dir_path", 2, tt_root, user); -} - -/** - * Get the distributed cache directory for a particular user - */ -char *get_distributed_cache_directory(const char *tt_root, const char *user, - const char* unique_string) { - return concatenate(USER_DISTRIBUTED_CACHE_DIR_PATTERN, - "dist_cache_unique_path", 3, tt_root, user, unique_string); -} - -char *get_job_work_directory(const char *job_dir) { - return concatenate(JOB_DIR_TO_JOB_WORK_PATTERN, "job_work_dir_path", 2, - job_dir, ""); -} -/** - * Get the attempt directory for the given attempt_id - */ -char *get_attempt_directory(const char *job_dir, const char *attempt_id) { - return concatenate(JOB_DIR_TO_ATTEMPT_DIR_PATTERN, "attempt_dir_path", 2, - job_dir, attempt_id); -} - -/* - * Get the path to the task launcher file which is created by the TT - */ -char *get_task_launcher_file(const char *job_dir, const char *attempt_dir) { - return concatenate(TASK_SCRIPT_PATTERN, "task_script_path", 2, job_dir, - attempt_dir); -} - -/* - * Builds the full path of the dir(localTaskDir or localWorkDir) - * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller - * dir_to_be_deleted : is either taskDir($taskId) OR taskWorkDir($taskId/work) - */ -char *get_task_dir_path(const char *tt_root, const char *user, - const char *jobid, const char *dir_to_be_deleted) { - return concatenate(TT_LOCAL_TASK_DIR_PATTERN, "task_dir_full_path", 4, - tt_root, user, jobid, dir_to_be_deleted); -} - -/** - * Get the log directory for the given attempt. - */ -char *get_task_log_dir(const char *log_dir, const char *job_id, - const char *attempt_id) { - return concatenate(ATTEMPT_LOG_DIR_PATTERN, "task_log_dir", 3, log_dir, - job_id, attempt_id); -} - -/** - * Get the log directory for the given job. - */ -char *get_job_log_dir(const char *log_dir, const char *job_id) { - return concatenate(JOB_LOG_DIR_PATTERN, "job_log_dir", 2, log_dir, job_id); -} - -/** - * Get the job ACLs file for the given job log dir. - */ -char *get_job_acls_file(const char *log_dir) { - return concatenate(JOB_LOG_DIR_TO_JOB_ACLS_FILE_PATTERN, "job_acls_file", - 1, log_dir); -} - -/** - * Function to check if the passed tt_root is present in mapreduce.cluster.local.dir - * the task-controller is configured with. - */ -int check_tt_root(const char *tt_root) { - return check_variable_against_config(TT_SYS_DIR_KEY, tt_root); -} - -/** - * Function to check if the constructed path and absolute path of the task - * launcher file resolve to one and same. This is done so as to avoid - * security pitfalls because of relative path components in the file name. - */ -int check_path_for_relative_components(char *path) { - char * resolved_path = (char *) canonicalize_file_name(path); - if (resolved_path == NULL) { - fprintf(LOGFILE, - "Error resolving the path: %s. Passed path: %s\n", - strerror(errno), path); - return ERROR_RESOLVING_FILE_PATH; - } - if (strcmp(resolved_path, path) != 0) { - fprintf(LOGFILE, - "Relative path components in the path: %s. Resolved path: %s\n", - path, resolved_path); - free(resolved_path); - return RELATIVE_PATH_COMPONENTS_IN_FILE_PATH; - } - free(resolved_path); - return 0; -} - -/** - * Function to change the owner/group of a given path. - */ -static int change_owner(const char *path, uid_t uid, gid_t gid) { - int exit_code = chown(path, uid, gid); - if (exit_code != 0) { - fprintf(LOGFILE, "chown %d:%d for path %s failed: %s.\n", uid, gid, path, - strerror(errno)); - } - return exit_code; -} - -/** - * Function to change the mode of a given path. - */ -static int change_mode(const char *path, mode_t mode) { - int exit_code = chmod(path, mode); - if (exit_code != 0) { - fprintf(LOGFILE, "chmod %d of path %s failed: %s.\n", mode, path, - strerror(errno)); - } - return exit_code; -} - -/** - * Function to change permissions of the given path. It does the following - * recursively: - * 1) changes the owner/group of the paths to the passed owner/group - * 2) changes the file permission to the passed file_mode and directory - * permission to the passed dir_mode - * - * should_check_ownership : boolean to enable checking of ownership of each path - */ -static int secure_path(const char *path, uid_t uid, gid_t gid, - mode_t file_mode, mode_t dir_mode, int should_check_ownership) { - FTS *tree = NULL; // the file hierarchy - FTSENT *entry = NULL; // a file in the hierarchy - char *paths[] = { (char *) path, NULL };//array needs to be NULL-terminated - int process_path = 0; - int dir = 0; - int error_code = 0; - int done = 0; - - // Get physical locations and don't resolve the symlinks. - // Don't change directory while walking the directory. - int ftsoptions = FTS_PHYSICAL | FTS_NOCHDIR; - - tree = fts_open(paths, ftsoptions, NULL); - if (tree == NULL) { - fprintf(LOGFILE, - "Cannot open file traversal structure for the path %s:%s.\n", path, - strerror(errno)); - return -1; - } - - while (((entry = fts_read(tree)) != NULL) && !done) { - dir = 0; - switch (entry->fts_info) { - case FTS_D: - // A directory being visited in pre-order. - // We change ownership of directories in post-order. - // so ignore the pre-order visit. - process_path = 0; - break; - case FTS_DC: - // A directory that causes a cycle in the tree - // We don't expect cycles, ignore. - process_path = 0; - break; - case FTS_DNR: - // A directory which cannot be read - // Ignore and set error code. - process_path = 0; - error_code = -1; - break; - case FTS_DOT: - // "." or ".." - process_path = 0; - break; - case FTS_F: - // A regular file - process_path = 1; - break; - case FTS_DP: - // A directory being visited in post-order - if (entry->fts_level == 0) { - // root directory. Done with traversing. - done = 1; - } - process_path = 1; - dir = 1; - break; - case FTS_SL: - // A symbolic link - // We don't want to change-ownership(and set-permissions) for the file/dir - // pointed to by any symlink. - process_path = 0; - break; - case FTS_SLNONE: - // A symbolic link with a nonexistent target - process_path = 0; - break; - case FTS_NS: - // A file for which no stat(2) information was available - // Ignore and set error code - process_path = 0; - error_code = -1; - break; - case FTS_ERR: - // An error return. Ignore and set error code. - process_path = 0; - error_code = -1; - break; - case FTS_DEFAULT: - // File that doesn't belong to any of the above type. Ignore. - process_path = 0; - break; - default: - // None of the above. Ignore and set error code - process_path = 0; - error_code = -1; - } - - if (error_code != 0) { - break; - } - if (!process_path) { - continue; - } - error_code = secure_single_path(entry->fts_path, uid, gid, - (dir ? dir_mode : file_mode), should_check_ownership); - - } - if (fts_close(tree) != 0) { - fprintf(LOGFILE, "couldn't close file traversal structure:%s.\n", - strerror(errno)); - } - return error_code; -} - -/** - * Function to change ownership and permissions of the given path. - * This call sets ownership and permissions just for the path, not recursive. - */ -int secure_single_path(char *path, uid_t uid, gid_t gid, - mode_t perm, int should_check_ownership) { - int error_code = 0; - if (should_check_ownership && - (check_ownership(path, uid, gid) != 0)) { - fprintf(LOGFILE, - "Invalid file path. %s not user/group owned by the tasktracker.\n", path); - error_code = -1; - } else if (change_owner(path, uid, gid) != 0) { - fprintf(LOGFILE, "couldn't change the ownership of %s\n", path); - error_code = -3; - } else if (change_mode(path, perm) != 0) { - fprintf(LOGFILE, "couldn't change the permissions of %s\n", path); - error_code = -3; - } - return error_code; -} - -/** - * Function to prepare the attempt directories for the task JVM. - * This is done by changing the ownership of the attempt directory recursively - * to the job owner. We do the following: - * * sudo chown user:mapred -R taskTracker/$user/jobcache/$jobid/$attemptid/ - * * sudo chmod 2770 -R taskTracker/$user/jobcache/$jobid/$attemptid/ - */ -int prepare_attempt_directories(const char *job_id, const char *attempt_id, - const char *user) { - if (job_id == NULL || attempt_id == NULL || user == NULL) { - fprintf(LOGFILE, "Either attempt_id is null or the user passed is null.\n"); - return INVALID_ARGUMENT_NUMBER; - } - - gid_t tasktracker_gid = getegid(); // the group permissions of the binary. - - if (get_user_details(user) < 0) { - fprintf(LOGFILE, "Couldn't get the user details of %s.\n", user); - return INVALID_USER_NAME; - } - - char **local_dir = (char **) get_values(TT_SYS_DIR_KEY); - - if (local_dir == NULL) { - fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY); - cleanup(); - return PREPARE_ATTEMPT_DIRECTORIES_FAILED; - } - - char *full_local_dir_str = (char *) get_value(TT_SYS_DIR_KEY); -#ifdef DEBUG - fprintf(LOGFILE, "Value from config for %s is %s.\n", TT_SYS_DIR_KEY, - full_local_dir_str); -#endif - - char *job_dir; - char *attempt_dir; - char **local_dir_ptr = local_dir; - int failed = 0; - while (*local_dir_ptr != NULL) { - job_dir = get_job_directory(*local_dir_ptr, user, job_id); - if (job_dir == NULL) { - fprintf(LOGFILE, "Couldn't get job directory for %s.\n", job_id); - failed = 1; - break; - } - - // prepare attempt-dir in each of the mapreduce.cluster.local.dir - attempt_dir = get_attempt_directory(job_dir, attempt_id); - if (attempt_dir == NULL) { - fprintf(LOGFILE, "Couldn't get attempt directory for %s.\n", attempt_id); - failed = 1; - free(job_dir); - break; - } - - struct stat filestat; - if (stat(attempt_dir, &filestat) != 0) { - if (errno == ENOENT) { -#ifdef DEBUG - fprintf(LOGFILE, - "attempt_dir %s doesn't exist. Not doing anything.\n", attempt_dir); -#endif - } else { - // stat failed because of something else! - fprintf(LOGFILE, "Failed to stat the attempt_dir %s\n", attempt_dir); - failed = 1; - free(attempt_dir); - free(job_dir); - break; - } - } else if (secure_path(attempt_dir, user_detail->pw_uid, - tasktracker_gid, S_IRWXU | S_IRWXG, S_ISGID | S_IRWXU | S_IRWXG, - 1) != 0) { - // No setgid on files and setgid on dirs, 770 - fprintf(LOGFILE, "Failed to secure the attempt_dir %s\n", attempt_dir); - failed = 1; - free(attempt_dir); - free(job_dir); - break; - } - - local_dir_ptr++; - free(attempt_dir); - free(job_dir); - } - free(local_dir); - free(full_local_dir_str); - - cleanup(); - if (failed) { - return PREPARE_ATTEMPT_DIRECTORIES_FAILED; - } - return 0; -} - -/** - * Function to prepare the job log dir(and job acls file in it) for the child. - * It gives the user ownership of the job's log-dir to the user and - * group ownership to the user running tasktracker(i.e. tt_user). - * - * * sudo chown user:mapred log-dir/userlogs/$jobid - * * if user is not $tt_user, - * * sudo chmod 2570 log-dir/userlogs/$jobid - * * else - * * sudo chmod 2770 log-dir/userlogs/$jobid - * * sudo chown user:mapred log-dir/userlogs/$jobid/job-acls.xml - * * if user is not $tt_user, - * * sudo chmod 2570 log-dir/userlogs/$jobid/job-acls.xml - * * else - * * sudo chmod 2770 log-dir/userlogs/$jobid/job-acls.xml - */ -int prepare_job_logs(const char *log_dir, const char *job_id, - mode_t permissions) { - - char *job_log_dir = get_job_log_dir(log_dir, job_id); - if (job_log_dir == NULL) { - fprintf(LOGFILE, "Couldn't get job log directory %s.\n", job_log_dir); - return -1; - } - - struct stat filestat; - if (stat(job_log_dir, &filestat) != 0) { - if (errno == ENOENT) { -#ifdef DEBUG - fprintf(LOGFILE, "job_log_dir %s doesn't exist. Not doing anything.\n", - job_log_dir); -#endif - free(job_log_dir); - return 0; - } else { - // stat failed because of something else! - fprintf(LOGFILE, "Failed to stat the job log dir %s\n", job_log_dir); - free(job_log_dir); - return -1; - } - } - - gid_t tasktracker_gid = getegid(); // the group permissions of the binary. - // job log directory should not be set permissions recursively - // because, on tt restart/reinit, it would contain directories of earlier run - if (secure_single_path(job_log_dir, user_detail->pw_uid, tasktracker_gid, - S_ISGID | permissions, 1) != 0) { - fprintf(LOGFILE, "Failed to secure the log_dir %s\n", job_log_dir); - free(job_log_dir); - return -1; - } - - //set ownership and permissions for job_log_dir/job-acls.xml, if exists. - char *job_acls_file = get_job_acls_file(job_log_dir); - if (job_acls_file == NULL) { - fprintf(LOGFILE, "Couldn't get job acls file %s.\n", job_acls_file); - free(job_log_dir); - return -1; - } - - struct stat filestat1; - if (stat(job_acls_file, &filestat1) != 0) { - if (errno == ENOENT) { -#ifdef DEBUG - fprintf(LOGFILE, "job_acls_file %s doesn't exist. Not doing anything.\n", - job_acls_file); -#endif - free(job_acls_file); - free(job_log_dir); - return 0; - } else { - // stat failed because of something else! - fprintf(LOGFILE, "Failed to stat the job_acls_file %s\n", job_acls_file); - free(job_acls_file); - free(job_log_dir); - return -1; - } - } - - if (secure_single_path(job_acls_file, user_detail->pw_uid, tasktracker_gid, - permissions, 1) != 0) { - fprintf(LOGFILE, "Failed to secure the job acls file %s\n", job_acls_file); - free(job_acls_file); - free(job_log_dir); - return -1; - } - free(job_acls_file); - free(job_log_dir); - return 0; -} - -/** - * Function to prepare the task logs for the child. It gives the user - * ownership of the attempt's log-dir to the user and group ownership to the - * user running tasktracker. - * * sudo chown user:mapred log-dir/userlogs/$jobid/$attemptid - * * sudo chmod -R 2770 log-dir/userlogs/$jobid/$attemptid - */ -int prepare_task_logs(const char *log_dir, const char *job_id, - const char *task_id) { - - char *task_log_dir = get_task_log_dir(log_dir, job_id, task_id); - if (task_log_dir == NULL) { - fprintf(LOGFILE, "Couldn't get task_log directory %s.\n", task_log_dir); - return -1; - } - - struct stat filestat; - if (stat(task_log_dir, &filestat) != 0) { - if (errno == ENOENT) { - // See TaskRunner.java to see that an absent log-dir doesn't fail the task. -#ifdef DEBUG - fprintf(LOGFILE, "task_log_dir %s doesn't exist. Not doing anything.\n", - task_log_dir); -#endif - free(task_log_dir); - return 0; - } else { - // stat failed because of something else! - fprintf(LOGFILE, "Failed to stat the task_log_dir %s\n", task_log_dir); - free(task_log_dir); - return -1; - } - } - - gid_t tasktracker_gid = getegid(); // the group permissions of the binary. - if (secure_path(task_log_dir, user_detail->pw_uid, tasktracker_gid, - S_IRWXU | S_IRWXG, S_ISGID | S_IRWXU | S_IRWXG, 1) != 0) { - // setgid on dirs but not files, 770. As of now, there are no files though - fprintf(LOGFILE, "Failed to secure the log_dir %s\n", task_log_dir); - free(task_log_dir); - return -1; - } - free(task_log_dir); - return 0; -} - -//function used to populate and user_details structure. -int get_user_details(const char *user) { - if (user_detail == NULL) { - user_detail = getpwnam(user); - if (user_detail == NULL) { - fprintf(LOGFILE, "Invalid user\n"); - return -1; - } - } - return 0; -} - -/* - * Function to check if the TaskTracker actually owns the file. - * Or it has right ownership already. - */ -int check_ownership(char *path, uid_t uid, gid_t gid) { - struct stat filestat; - if (stat(path, &filestat) != 0) { - return UNABLE_TO_STAT_FILE; - } - // check user/group. User should be TaskTracker user, group can either be - // TaskTracker's primary group or the special group to which binary's - // permissions are set. - // Or it can be the user/group owned by uid and gid passed. - if ((getuid() != filestat.st_uid || (getgid() != filestat.st_gid && getegid() - != filestat.st_gid)) && - ((uid != filestat.st_uid) || (gid != filestat.st_gid))) { - return FILE_NOT_OWNED_BY_TASKTRACKER; - } - return 0; -} - -/** - * Function to initialize the user directories of a user. - * It does the following: - * * sudo chown user:mapred -R taskTracker/$user - * * if user is not $tt_user, - * * sudo chmod 2570 -R taskTracker/$user - * * else // user is tt_user - * * sudo chmod 2770 -R taskTracker/$user - * This is done once per every user on the TaskTracker. - */ -int initialize_user(const char *user) { - - if (user == NULL) { - fprintf(LOGFILE, "user passed is null.\n"); - return INVALID_ARGUMENT_NUMBER; - } - - if (get_user_details(user) < 0) { - fprintf(LOGFILE, "Couldn't get the user details of %s", user); - return INVALID_USER_NAME; - } - - gid_t tasktracker_gid = getegid(); // the group permissions of the binary. - - char **local_dir = (char **) get_values(TT_SYS_DIR_KEY); - if (local_dir == NULL) { - fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY); - cleanup(); - return INVALID_TT_ROOT; - } - - char *full_local_dir_str = (char *) get_value(TT_SYS_DIR_KEY); -#ifdef DEBUG - fprintf(LOGFILE, "Value from config for %s is %s.\n", TT_SYS_DIR_KEY, - full_local_dir_str); -#endif - - int is_tt_user = (user_detail->pw_uid == getuid()); - - // for tt_user, set 770 permissions; otherwise set 570 - mode_t permissions = is_tt_user ? (S_IRWXU | S_IRWXG) - : (S_IRUSR | S_IXUSR | S_IRWXG); - char *user_dir; - char **local_dir_ptr = local_dir; - int failed = 0; - while (*local_dir_ptr != NULL) { - user_dir = get_user_directory(*local_dir_ptr, user); - if (user_dir == NULL) { - fprintf(LOGFILE, "Couldn't get userdir directory for %s.\n", user); - failed = 1; - break; - } - - struct stat filestat; - if (stat(user_dir, &filestat) != 0) { - if (errno == ENOENT) { -#ifdef DEBUG - fprintf(LOGFILE, "user_dir %s doesn't exist. Not doing anything.\n", - user_dir); -#endif - } else { - // stat failed because of something else! - fprintf(LOGFILE, "Failed to stat the user_dir %s\n", - user_dir); - failed = 1; - free(user_dir); - break; - } - } else if (secure_path(user_dir, user_detail->pw_uid, - tasktracker_gid, permissions, S_ISGID | permissions, 1) != 0) { - // No setgid on files and setgid on dirs, - // 770 for tt_user and 570 for any other user - fprintf(LOGFILE, "Failed to secure the user_dir %s\n", - user_dir); - failed = 1; - free(user_dir); - break; - } - - local_dir_ptr++; - free(user_dir); - } - free(local_dir); - free(full_local_dir_str); - cleanup(); - if (failed) { - return INITIALIZE_USER_FAILED; - } - return 0; -} - -/** - * Function to prepare the job directories for the task JVM. - * We do the following: - * * sudo chown user:mapred -R taskTracker/$user/jobcache/$jobid - * * sudo chown user:mapred -R logs/userlogs/$jobid - * * if user is not $tt_user, - * * sudo chmod 2570 -R taskTracker/$user/jobcache/$jobid - * * sudo chmod 2570 -R logs/userlogs/$jobid - * * else // user is tt_user - * * sudo chmod 2770 -R taskTracker/$user/jobcache/$jobid - * * sudo chmod 2770 -R logs/userlogs/$jobid - * * - * * For any user, sudo chmod 2770 taskTracker/$user/jobcache/$jobid/work - */ -int initialize_job(const char *jobid, const char *user) { - if (jobid == NULL || user == NULL) { - fprintf(LOGFILE, "Either jobid is null or the user passed is null.\n"); - return INVALID_ARGUMENT_NUMBER; - } - - if (get_user_details(user) < 0) { - fprintf(LOGFILE, "Couldn't get the user details of %s", user); - return INVALID_USER_NAME; - } - - gid_t tasktracker_gid = getegid(); // the group permissions of the binary. - - char **local_dir = (char **) get_values(TT_SYS_DIR_KEY); - if (local_dir == NULL) { - fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY); - cleanup(); - return INVALID_TT_ROOT; - } - - char *full_local_dir_str = (char *) get_value(TT_SYS_DIR_KEY); -#ifdef DEBUG - fprintf(LOGFILE, "Value from config for %s is %s.\n", TT_SYS_DIR_KEY, - full_local_dir_str); -#endif - - int is_tt_user = (user_detail->pw_uid == getuid()); - - // for tt_user, set 770 permissions; for any other user, set 570 for job-dir - mode_t permissions = is_tt_user ? (S_IRWXU | S_IRWXG) - : (S_IRUSR | S_IXUSR | S_IRWXG); - char *job_dir, *job_work_dir; - char **local_dir_ptr = local_dir; - int failed = 0; - while (*local_dir_ptr != NULL) { - job_dir = get_job_directory(*local_dir_ptr, user, jobid); - if (job_dir == NULL) { - fprintf(LOGFILE, "Couldn't get job directory for %s.\n", jobid); - failed = 1; - break; - } - - struct stat filestat; - if (stat(job_dir, &filestat) != 0) { - if (errno == ENOENT) { -#ifdef DEBUG - fprintf(LOGFILE, "job_dir %s doesn't exist. Not doing anything.\n", - job_dir); -#endif - } else { - // stat failed because of something else! - fprintf(LOGFILE, "Failed to stat the job_dir %s\n", job_dir); - failed = 1; - free(job_dir); - break; - } - } else if (secure_path(job_dir, user_detail->pw_uid, tasktracker_gid, - permissions, S_ISGID | permissions, 1) != 0) { - // No setgid on files and setgid on dirs, - // 770 for tt_user and 570 for any other user - fprintf(LOGFILE, "Failed to secure the job_dir %s\n", job_dir); - failed = 1; - free(job_dir); - break; - } else if (!is_tt_user) { - // For tt_user, we don't need this as we already set 2770 for - // job-work-dir because of "chmod -R" done above - job_work_dir = get_job_work_directory(job_dir); - if (job_work_dir == NULL) { - fprintf(LOGFILE, "Couldn't get job-work directory for %s.\n", jobid); - failed = 1; - break; - } - - // Set 2770 on the job-work directory - if (stat(job_work_dir, &filestat) != 0) { - if (errno == ENOENT) { -#ifdef DEBUG - fprintf(LOGFILE, - "job_work_dir %s doesn't exist. Not doing anything.\n", - job_work_dir); -#endif - free(job_work_dir); - } else { - // stat failed because of something else! - fprintf(LOGFILE, "Failed to stat the job_work_dir %s\n", - job_work_dir); - failed = 1; - free(job_work_dir); - free(job_dir); - break; - } - } else if (change_mode(job_work_dir, S_ISGID | S_IRWXU | S_IRWXG) != 0) { - fprintf(LOGFILE, - "couldn't change the permissions of job_work_dir %s\n", - job_work_dir); - failed = 1; - free(job_work_dir); - free(job_dir); - break; - } - } - - local_dir_ptr++; - free(job_dir); - } - free(local_dir); - free(full_local_dir_str); - int exit_code = 0; - if (failed) { - exit_code = INITIALIZE_JOB_FAILED; - goto cleanup; - } - - char *log_dir = (char *) get_value(TT_LOG_DIR_KEY); - if (log_dir == NULL) { - fprintf(LOGFILE, "Log directory is not configured.\n"); - exit_code = INVALID_TT_LOG_DIR; - goto cleanup; - } - - if (prepare_job_logs(log_dir, jobid, permissions) != 0) { - fprintf(LOGFILE, "Couldn't prepare job logs directory %s for %s.\n", - log_dir, jobid); - exit_code = PREPARE_JOB_LOGS_FAILED; - } - - cleanup: - // free configurations - cleanup(); - if (log_dir != NULL) { - free(log_dir); - } - return exit_code; -} - -/** - * Function to initialize the distributed cache file for a user. - * It does the following: - * * sudo chown user:mapred -R taskTracker/$user/distcache/ - * * if user is not $tt_user, - * * sudo chmod 2570 -R taskTracker/$user/distcache/ - * * else // user is tt_user - * * sudo chmod 2770 -R taskTracker/$user/distcache/ - * This is done once per localization. Tasks reusing JVMs just create - * symbolic links themselves and so there isn't anything specific to do in - * that case. - */ -int initialize_distributed_cache_file(const char *tt_root, - const char *unique_string, const char *user) { - if (tt_root == NULL) { - fprintf(LOGFILE, "tt_root passed is null.\n"); - return INVALID_ARGUMENT_NUMBER; - } - if (unique_string == NULL) { - fprintf(LOGFILE, "unique_string passed is null.\n"); - return INVALID_ARGUMENT_NUMBER; - } - - if (user == NULL) { - fprintf(LOGFILE, "user passed is null.\n"); - return INVALID_ARGUMENT_NUMBER; - } - - if (get_user_details(user) < 0) { - fprintf(LOGFILE, "Couldn't get the user details of %s", user); - return INVALID_USER_NAME; - } - //Check tt_root - if (check_tt_root(tt_root) < 0) { - fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root); - cleanup(); - return INVALID_TT_ROOT; - } - - // set permission on the unique directory - char *localized_unique_dir = get_distributed_cache_directory(tt_root, user, - unique_string); - if (localized_unique_dir == NULL) { - fprintf(LOGFILE, "Couldn't get unique distcache directory for %s.\n", user); - cleanup(); - return INITIALIZE_DISTCACHEFILE_FAILED; - } - - gid_t binary_gid = getegid(); // the group permissions of the binary. - - int is_tt_user = (user_detail->pw_uid == getuid()); - - // for tt_user, set 770 permissions; for any other user, set 570 - mode_t permissions = is_tt_user ? (S_IRWXU | S_IRWXG) - : (S_IRUSR | S_IXUSR | S_IRWXG); - int failed = 0; - struct stat filestat; - if (stat(localized_unique_dir, &filestat) != 0) { - // stat on distcache failed because of something - fprintf(LOGFILE, "Failed to stat the localized_unique_dir %s\n", - localized_unique_dir); - failed = INITIALIZE_DISTCACHEFILE_FAILED; - } else if (secure_path(localized_unique_dir, user_detail->pw_uid, - binary_gid, permissions, S_ISGID | permissions, 1) != 0) { - // No setgid on files and setgid on dirs, - // 770 for tt_user and 570 for any other user - fprintf(LOGFILE, "Failed to secure the localized_unique_dir %s\n", - localized_unique_dir); - failed = INITIALIZE_DISTCACHEFILE_FAILED; - } - free(localized_unique_dir); - cleanup(); - return failed; -} - -/** - * Function used to initialize task. Prepares attempt_dir, jars_dir and - * log_dir to be accessible by the child - */ -int initialize_task(const char *jobid, const char *taskid, const char *user) { - int exit_code = 0; -#ifdef DEBUG - fprintf(LOGFILE, "job-id passed to initialize_task : %s.\n", jobid); - fprintf(LOGFILE, "task-d passed to initialize_task : %s.\n", taskid); -#endif - - if (prepare_attempt_directories(jobid, taskid, user) != 0) { - fprintf(LOGFILE, - "Couldn't prepare the attempt directories for %s of user %s.\n", - taskid, user); - exit_code = PREPARE_ATTEMPT_DIRECTORIES_FAILED; - goto cleanup; - } - - char *log_dir = (char *) get_value(TT_LOG_DIR_KEY); - if (log_dir == NULL) { - fprintf(LOGFILE, "Log directory is not configured.\n"); - exit_code = INVALID_TT_LOG_DIR; - goto cleanup; - } - - if (prepare_task_logs(log_dir, jobid, taskid) != 0) { - fprintf(LOGFILE, "Couldn't prepare task logs directory %s for %s.\n", - log_dir, taskid); - exit_code = PREPARE_TASK_LOGS_FAILED; - } - - cleanup: - // free configurations - cleanup(); - if (log_dir != NULL) { - free(log_dir); - } - return exit_code; -} - -/* - * Function used to launch a task as the provided user. - */ -int run_task_as_user(const char * user, const char *jobid, const char *taskid, - const char *tt_root) { - return run_process_as_user(user, jobid, taskid, tt_root, LAUNCH_TASK_JVM); -} - -/* - * Function that is used as a helper to launch task JVMs and debug scripts. - * Not meant for launching any other process. It does the following : - * 1) Checks if the tt_root passed is found in mapreduce.cluster.local.dir - * 2) Prepares attempt_dir and log_dir to be accessible by the task JVMs - * 3) Uses get_task_launcher_file to fetch the task script file path - * 4) Does an execlp on the same in order to replace the current image with - * task image. - */ -int run_process_as_user(const char * user, const char * jobid, -const char *taskid, const char *tt_root, int command) { - if (command != LAUNCH_TASK_JVM && command != RUN_DEBUG_SCRIPT) { - return INVALID_COMMAND_PROVIDED; - } - if (jobid == NULL || taskid == NULL || tt_root == NULL) { - return INVALID_ARGUMENT_NUMBER; - } - - if (command == LAUNCH_TASK_JVM) { - fprintf(LOGFILE, "run_process_as_user launching a JVM for task :%s.\n", taskid); - } else if (command == RUN_DEBUG_SCRIPT) { - fprintf(LOGFILE, "run_process_as_user launching a debug script for task :%s.\n", taskid); - } - -#ifdef DEBUG - fprintf(LOGFILE, "Job-id passed to run_process_as_user : %s.\n", jobid); - fprintf(LOGFILE, "task-d passed to run_process_as_user : %s.\n", taskid); - fprintf(LOGFILE, "tt_root passed to run_process_as_user : %s.\n", tt_root); -#endif - - //Check tt_root before switching the user, as reading configuration - //file requires privileged access. - if (check_tt_root(tt_root) < 0) { - fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root); - cleanup(); - return INVALID_TT_ROOT; - } - - int exit_code = 0; - char *job_dir = NULL, *task_script_path = NULL; - - if (command == LAUNCH_TASK_JVM && - (exit_code = initialize_task(jobid, taskid, user)) != 0) { - fprintf(LOGFILE, "Couldn't initialise the task %s of user %s.\n", taskid, - user); - goto cleanup; - } - - job_dir = get_job_directory(tt_root, user, jobid); - if (job_dir == NULL) { - fprintf(LOGFILE, "Couldn't obtain job_dir for %s in %s.\n", jobid, tt_root); - exit_code = OUT_OF_MEMORY; - goto cleanup; - } - - task_script_path = get_task_launcher_file(job_dir, taskid); - if (task_script_path == NULL) { - fprintf(LOGFILE, "Couldn't obtain task_script_path in %s.\n", job_dir); - exit_code = OUT_OF_MEMORY; - goto cleanup; - } - - errno = 0; - exit_code = check_path_for_relative_components(task_script_path); - if(exit_code != 0) { - goto cleanup; - } - - //change the user - fcloseall(); - free(job_dir); - umask(0007); - if (change_user(user) != 0) { - exit_code = SETUID_OPER_FAILED; - goto cleanup; - } - - errno = 0; - cleanup(); - execlp(task_script_path, task_script_path, NULL); - if (errno != 0) { - free(task_script_path); - if (command == LAUNCH_TASK_JVM) { - fprintf(LOGFILE, "Couldn't execute the task jvm file: %s", strerror(errno)); - exit_code = UNABLE_TO_EXECUTE_TASK_SCRIPT; - } else if (command == RUN_DEBUG_SCRIPT) { - fprintf(LOGFILE, "Couldn't execute the task debug script file: %s", strerror(errno)); - exit_code = UNABLE_TO_EXECUTE_DEBUG_SCRIPT; - } - } - - return exit_code; - -cleanup: - if (job_dir != NULL) { - free(job_dir); - } - if (task_script_path != NULL) { - free(task_script_path); - } - // free configurations - cleanup(); - return exit_code; -} -/* - * Function used to launch a debug script as the provided user. - */ -int run_debug_script_as_user(const char * user, const char *jobid, const char *taskid, - const char *tt_root) { - return run_process_as_user(user, jobid, taskid, tt_root, RUN_DEBUG_SCRIPT); -} -/** - * Function used to terminate/kill a task launched by the user, - * or dump the process' stack (by sending SIGQUIT). - * The function sends appropriate signal to the process group - * specified by the task_pid. - */ -int kill_user_task(const char *user, const char *task_pid, int sig) { - int pid = 0; - - if(task_pid == NULL) { - return INVALID_ARGUMENT_NUMBER; - } - -#ifdef DEBUG - fprintf(LOGFILE, "user passed to kill_user_task : %s.\n", user); - fprintf(LOGFILE, "task-pid passed to kill_user_task : %s.\n", task_pid); - fprintf(LOGFILE, "signal passed to kill_user_task : %d.\n", sig); -#endif - - pid = atoi(task_pid); - - if(pid <= 0) { - return INVALID_TASK_PID; - } - - fcloseall(); - if (change_user(user) != 0) { - cleanup(); - return SETUID_OPER_FAILED; - } - - //Don't continue if the process-group is not alive anymore. - if(kill(-pid,0) < 0) { - errno = 0; - cleanup(); - return 0; - } - - if (kill(-pid, sig) < 0) { - if(errno != ESRCH) { - fprintf(LOGFILE, "Error is %s\n", strerror(errno)); - cleanup(); - return UNABLE_TO_KILL_TASK; - } - errno = 0; - } - cleanup(); - return 0; -} - -/** - * Enables the path for deletion by changing the owner, group and permissions - * of the specified path and all the files/directories in the path recursively. - * * sudo chown user:mapred -R full_path - * * sudo chmod 2770 -R full_path - * Before changing permissions, makes sure that the given path doesn't contain - * any relative components. - * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller - * full_path : is either jobLocalDir, taskDir OR taskWorkDir that is to be - * deleted - */ -static int enable_path_for_cleanup(const char *tt_root, const char *user, - char *full_path) { - int exit_code = 0; - gid_t tasktracker_gid = getegid(); // the group permissions of the binary. - - if (check_tt_root(tt_root) < 0) { - fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root); - cleanup(); - return INVALID_TT_ROOT; - } - - if (full_path == NULL) { - fprintf(LOGFILE, - "Could not build the full path. Not deleting the dir %s\n", - full_path); - exit_code = UNABLE_TO_BUILD_PATH; // may be malloc failed - } - // Make sure that the path given is not having any relative components - else if ((exit_code = check_path_for_relative_components(full_path)) != 0) { - fprintf(LOGFILE, - "Not changing permissions. Path may contain relative components.\n", - full_path); - } - else if (get_user_details(user) < 0) { - fprintf(LOGFILE, "Couldn't get the user details of %s.\n", user); - exit_code = INVALID_USER_NAME; - } - else if (exit_code = secure_path(full_path, user_detail->pw_uid, - tasktracker_gid, - S_IRWXU | S_IRWXG, S_ISGID | S_IRWXU | S_IRWXG, 0) != 0) { - // No setgid on files and setgid on dirs, 770. - // set 770 permissions for user, TTgroup for all files/directories in - // 'full_path' recursively sothat deletion of path by TaskTracker succeeds. - - fprintf(LOGFILE, "Failed to set permissions for %s\n", full_path); - } - - if (full_path != NULL) { - free(full_path); - } - // free configurations - cleanup(); - return exit_code; -} - -/** - * Enables the task work-dir/local-dir path for deletion. - * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller - * dir_to_be_deleted : is either taskDir OR taskWorkDir that is to be deleted - */ -int enable_task_for_cleanup(const char *tt_root, const char *user, - const char *jobid, const char *dir_to_be_deleted) { - char *full_path = get_task_dir_path(tt_root, user, jobid, dir_to_be_deleted); - return enable_path_for_cleanup(tt_root, user, full_path); -} - -/** - * Enables the jobLocalDir for deletion. - * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller - * user : owner of the job - * jobid : id of the job for which the cleanup is needed. - */ -int enable_job_for_cleanup(const char *tt_root, const char *user, - const char *jobid) { - char *full_path = get_job_directory(tt_root, user, jobid); - return enable_path_for_cleanup(tt_root, user, full_path); -} diff --git a/hadoop-mapreduce-project/src/c++/task-controller/task-controller.h b/hadoop-mapreduce-project/src/c++/task-controller/task-controller.h deleted file mode 100644 index 55d1221875..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/task-controller.h +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "configuration.h" - -//command definitions -enum command { - INITIALIZE_USER, - INITIALIZE_JOB, - INITIALIZE_DISTRIBUTEDCACHE_FILE, - LAUNCH_TASK_JVM, - INITIALIZE_TASK, - TERMINATE_TASK_JVM, - KILL_TASK_JVM, - RUN_DEBUG_SCRIPT, - SIGQUIT_TASK_JVM, - ENABLE_TASK_FOR_CLEANUP, - ENABLE_JOB_FOR_CLEANUP -}; - -enum errorcodes { - INVALID_ARGUMENT_NUMBER = 1, - INVALID_USER_NAME, //2 - INVALID_COMMAND_PROVIDED, //3 - SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS, //4 - INVALID_TT_ROOT, //5 - SETUID_OPER_FAILED, //6 - UNABLE_TO_EXECUTE_TASK_SCRIPT, //7 - UNABLE_TO_KILL_TASK, //8 - INVALID_TASK_PID, //9 - ERROR_RESOLVING_FILE_PATH, //10 - RELATIVE_PATH_COMPONENTS_IN_FILE_PATH, //11 - UNABLE_TO_STAT_FILE, //12 - FILE_NOT_OWNED_BY_TASKTRACKER, //13 - PREPARE_ATTEMPT_DIRECTORIES_FAILED, //14 - INITIALIZE_JOB_FAILED, //15 - PREPARE_TASK_LOGS_FAILED, //16 - INVALID_TT_LOG_DIR, //17 - OUT_OF_MEMORY, //18 - INITIALIZE_DISTCACHEFILE_FAILED, //19 - INITIALIZE_USER_FAILED, //20 - UNABLE_TO_EXECUTE_DEBUG_SCRIPT, //21 - INVALID_CONF_DIR, //22 - UNABLE_TO_BUILD_PATH, //23 - INVALID_TASKCONTROLLER_PERMISSIONS, //24 - PREPARE_JOB_LOGS_FAILED, //25 -}; - -#define USER_DIR_PATTERN "%s/taskTracker/%s" - -#define TT_JOB_DIR_PATTERN USER_DIR_PATTERN"/jobcache/%s" - -#define USER_DISTRIBUTED_CACHE_DIR_PATTERN USER_DIR_PATTERN"/distcache/%s" - -#define JOB_DIR_TO_JOB_WORK_PATTERN "%s/work" - -#define JOB_DIR_TO_ATTEMPT_DIR_PATTERN "%s/%s" - -#define JOB_LOG_DIR_PATTERN "%s/userlogs/%s" - -#define JOB_LOG_DIR_TO_JOB_ACLS_FILE_PATTERN "%s/job-acls.xml" - -#define ATTEMPT_LOG_DIR_PATTERN JOB_LOG_DIR_PATTERN"/%s" - -#define TASK_SCRIPT_PATTERN "%s/%s/taskjvm.sh" - -#define TT_LOCAL_TASK_DIR_PATTERN "%s/taskTracker/%s/jobcache/%s/%s" - -#define TT_SYS_DIR_KEY "mapreduce.cluster.local.dir" - -#define TT_LOG_DIR_KEY "hadoop.log.dir" - -#define TT_GROUP_KEY "mapreduce.tasktracker.group" - -#ifndef HADOOP_CONF_DIR - #define EXEC_PATTERN "/bin/task-controller" - extern char * hadoop_conf_dir; -#endif - -extern struct passwd *user_detail; - -extern FILE *LOGFILE; - -int run_task_as_user(const char * user, const char *jobid, const char *taskid, - const char *tt_root); - -int run_debug_script_as_user(const char * user, const char *jobid, const char *taskid, - const char *tt_root); - -int initialize_user(const char *user); - -int initialize_task(const char *jobid, const char *taskid, const char *user); - -int initialize_job(const char *jobid, const char *user); - -int initialize_distributed_cache_file(const char *tt_root, - const char* unique_string, const char *user); - -int kill_user_task(const char *user, const char *task_pid, int sig); - -int enable_task_for_cleanup(const char *tt_root, const char *user, - const char *jobid, const char *dir_to_be_deleted); - -int enable_job_for_cleanup(const char *tt_root, const char *user, - const char *jobid); - -int prepare_attempt_directory(const char *attempt_dir, const char *user); - -// The following functions are exposed for testing - -int check_variable_against_config(const char *config_key, - const char *passed_value); - -int get_user_details(const char *user); - -char *get_task_launcher_file(const char *job_dir, const char *attempt_dir); diff --git a/hadoop-mapreduce-project/src/c++/task-controller/tests/test-task-controller.c b/hadoop-mapreduce-project/src/c++/task-controller/tests/test-task-controller.c deleted file mode 100644 index d6c2531db5..0000000000 --- a/hadoop-mapreduce-project/src/c++/task-controller/tests/test-task-controller.c +++ /dev/null @@ -1,243 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "../task-controller.h" - -#define HADOOP_CONF_DIR "/tmp" - -int write_config_file(char *file_name) { - FILE *file; - char const *str = - "mapreduce.cluster.local.dir=/tmp/testing1,/tmp/testing2,/tmp/testing3,/tmp/testing4\n"; - - file = fopen(file_name, "w"); - if (file == NULL) { - printf("Failed to open %s.\n", file_name); - return EXIT_FAILURE; - } - fwrite(str, 1, strlen(str), file); - fclose(file); - return 0; -} - -void test_check_variable_against_config() { - - // A temporary configuration directory - char *conf_dir_templ = "/tmp/test-task-controller-conf-dir-XXXXXX"; - - // To accomodate "/conf/taskcontroller.cfg" - char template[strlen(conf_dir_templ) + strlen("/conf/taskcontroller.cfg")]; - - strcpy(template, conf_dir_templ); - char *temp_dir = mkdtemp(template); - if (temp_dir == NULL) { - printf("Couldn't create a temporary dir for conf.\n"); - goto cleanup; - } - - // Set the configuration directory - hadoop_conf_dir = strdup(temp_dir); - - // create the configuration directory - strcat(template, "/conf"); - char *conf_dir = strdup(template); - mkdir(conf_dir, S_IRWXU); - - // create the configuration file - strcat(template, "/taskcontroller.cfg"); - if (write_config_file(template) != 0) { - printf("Couldn't write the configuration file.\n"); - goto cleanup; - } - - // Test obtaining a value for a key from the config - char *config_values[4] = { "/tmp/testing1", "/tmp/testing2", - "/tmp/testing3", "/tmp/testing4" }; - char *value = (char *) get_value("mapreduce.cluster.local.dir"); - if (strcmp(value, "/tmp/testing1,/tmp/testing2,/tmp/testing3,/tmp/testing4") - != 0) { - printf("Obtaining a value for a key from the config failed.\n"); - goto cleanup; - } - - // Test the parsing of a multiple valued key from the config - char **values = (char **)get_values("mapreduce.cluster.local.dir"); - char **values_ptr = values; - int i = 0; - while (*values_ptr != NULL) { - printf(" value : %s\n", *values_ptr); - if (strcmp(*values_ptr, config_values[i++]) != 0) { - printf("Configured values are not read out properly. Test failed!"); - goto cleanup;; - } - values_ptr++; - } - - if (check_variable_against_config("mapreduce.cluster.local.dir", "/tmp/testing5") == 0) { - printf("Configuration should not contain /tmp/testing5! \n"); - goto cleanup; - } - - if (check_variable_against_config("mapreduce.cluster.local.dir", "/tmp/testing4") != 0) { - printf("Configuration should contain /tmp/testing4! \n"); - goto cleanup; - } - - cleanup: if (value != NULL) { - free(value); - } - if (values != NULL) { - free(values); - } - if (hadoop_conf_dir != NULL) { - free(hadoop_conf_dir); - } - unlink(template); - rmdir(conf_dir); - rmdir(hadoop_conf_dir); -} - -void test_get_user_directory() { - char *user_dir = (char *) get_user_directory("/tmp", "user"); - printf("user_dir obtained is %s\n", user_dir); - int ret = 0; - if (strcmp(user_dir, "/tmp/taskTracker/user") != 0) { - ret = -1; - } - free(user_dir); - assert(ret == 0); -} - -void test_get_job_directory() { - char *job_dir = (char *) get_job_directory("/tmp", "user", - "job_200906101234_0001"); - printf("job_dir obtained is %s\n", job_dir); - int ret = 0; - if (strcmp(job_dir, "/tmp/taskTracker/user/jobcache/job_200906101234_0001") - != 0) { - ret = -1; - } - free(job_dir); - assert(ret == 0); -} - -void test_get_attempt_directory() { - char *job_dir = (char *) get_job_directory("/tmp", "user", - "job_200906101234_0001"); - printf("job_dir obtained is %s\n", job_dir); - char *attempt_dir = (char *) get_attempt_directory(job_dir, - "attempt_200906101234_0001_m_000000_0"); - printf("attempt_dir obtained is %s\n", attempt_dir); - int ret = 0; - if (strcmp( - attempt_dir, - "/tmp/taskTracker/user/jobcache/job_200906101234_0001/attempt_200906101234_0001_m_000000_0") - != 0) { - ret = -1; - } - free(job_dir); - free(attempt_dir); - assert(ret == 0); -} - -void test_get_task_launcher_file() { - char *job_dir = (char *) get_job_directory("/tmp", "user", - "job_200906101234_0001"); - char *task_file = (char *) get_task_launcher_file(job_dir, - "attempt_200906112028_0001_m_000000_0"); - printf("task_file obtained is %s\n", task_file); - int ret = 0; - if (strcmp( - task_file, - "/tmp/taskTracker/user/jobcache/job_200906101234_0001/attempt_200906112028_0001_m_000000_0/taskjvm.sh") - != 0) { - ret = -1; - } - free(task_file); - assert(ret == 0); -} - -void test_get_job_log_dir() { - char *logdir = (char *) get_job_log_dir("/tmp/testing", - "job_200906101234_0001"); - printf("logdir obtained is %s\n", logdir); - int ret = 0; - if (strcmp(logdir, "/tmp/testing/userlogs/job_200906101234_0001") != 0) { - ret = -1; - } - free(logdir); - assert(ret == 0); -} - -void test_get_job_acls_file() { - char *job_acls_file = (char *) get_job_acls_file( - "/tmp/testing/userlogs/job_200906101234_0001"); - printf("job acls file obtained is %s\n", job_acls_file); - int ret = 0; - if (strcmp(job_acls_file, - "/tmp/testing/userlogs/job_200906101234_0001/job-acls.xml") != 0) { - ret = -1; - } - free(job_acls_file); - assert(ret == 0); -} - -void test_get_task_log_dir() { - char *logdir = (char *) get_task_log_dir("/tmp/testing", - "job_200906101234_0001", "attempt_200906112028_0001_m_000000_0"); - printf("logdir obtained is %s\n", logdir); - int ret = 0; - if (strcmp(logdir, - "/tmp/testing/userlogs/job_200906101234_0001/attempt_200906112028_0001_m_000000_0") - != 0) { - ret = -1; - } - free(logdir); - assert(ret == 0); -} - -int main(int argc, char **argv) { - printf("\nStarting tests\n"); - LOGFILE = stdout; - - printf("\nTesting check_variable_against_config()\n"); - test_check_variable_against_config(); - - printf("\nTesting get_user_directory()\n"); - test_get_user_directory(); - - printf("\nTesting get_job_directory()\n"); - test_get_job_directory(); - - printf("\nTesting get_attempt_directory()\n"); - test_get_attempt_directory(); - - printf("\nTesting get_task_launcher_file()\n"); - test_get_task_launcher_file(); - - printf("\nTesting get_job_log_dir()\n"); - test_get_job_log_dir(); - - printf("\nTesting get_job_acls_file()\n"); - test_get_job_acls_file(); - - printf("\nTesting get_task_log_dir()\n"); - test_get_task_log_dir(); - - printf("\nFinished tests\n"); - return 0; -} diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java index 337deaeedf..8cde11d9cd 100644 --- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java +++ b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java @@ -31,7 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.SocketOutputStream; @@ -389,7 +389,7 @@ public long sendBlock(DataOutputStream out, OutputStream baseStream) streamForSendChunks = baseStream; // assure a mininum buffer size. - maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE, + maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)/bytesPerChecksum; @@ -397,7 +397,7 @@ public long sendBlock(DataOutputStream out, OutputStream baseStream) pktSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, - (FSConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum); + (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum); pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket; } diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java index 56ca58ebbe..863784a9c7 100644 --- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java +++ b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java @@ -46,11 +46,11 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.*; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.common.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.RaidBlockSender; import org.apache.commons.logging.Log; @@ -741,17 +741,17 @@ private void sendFixedBlock(DatanodeInfo datanode, int readTimeout = getConf().getInt(BLOCKFIX_READ_TIMEOUT, - HdfsConstants.READ_TIMEOUT); + HdfsServerConstants.READ_TIMEOUT); NetUtils.connect(sock, target, readTimeout); sock.setSoTimeout(readTimeout); int writeTimeout = getConf().getInt(BLOCKFIX_WRITE_TIMEOUT, - HdfsConstants.WRITE_TIMEOUT); + HdfsServerConstants.WRITE_TIMEOUT); OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(baseStream, - FSConstants. + HdfsConstants. SMALL_BUFFER_SIZE)); boolean corruptChecksumOk = false; diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java b/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java deleted file mode 100644 index 5843269d66..0000000000 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java +++ /dev/null @@ -1,178 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.streaming; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.mapred.ClusterWithLinuxTaskController; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.security.Groups; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.ToolRunner; - -/** - * Test Streaming with LinuxTaskController running the jobs as a user different - * from the user running the cluster. See {@link ClusterWithLinuxTaskController} - */ -public class TestStreamingAsDifferentUser extends - ClusterWithLinuxTaskController { - - private Path inputPath = new Path("input"); - private Path outputPath = new Path("output"); - private String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n"; - private String map = - UtilTest.makeJavaCommand(TrApp.class, new String[] { ".", "\\n" }); - private String reduce = - UtilTest.makeJavaCommand(UniqApp.class, new String[] { "R" }); - - public void testStreaming() - throws Exception { - if (!shouldRun()) { - return; - } - startCluster(); - final JobConf myConf = getClusterConf(); - jobOwner.doAs(new PrivilegedExceptionAction() { - public Void run() throws IOException{ - - FileSystem inFs = inputPath.getFileSystem(myConf); - FileSystem outFs = outputPath.getFileSystem(myConf); - outFs.delete(outputPath, true); - if (!inFs.mkdirs(inputPath)) { - throw new IOException("Mkdirs failed to create " + inFs.toString()); - } - DataOutputStream file = inFs.create(new Path(inputPath, "part-0")); - file.writeBytes(input); - file.close(); - final String[] args = - new String[] { "-input", inputPath.makeQualified(inFs).toString(), - "-output", outputPath.makeQualified(outFs).toString(), "-mapper", - map, "-reducer", reduce, "-jobconf", - "mapreduce.task.files.preserve.failedtasks=true", "-jobconf", - "stream.tmpdir=" + System.getProperty("test.build.data", "/tmp") }; - - StreamJob streamJob = new StreamJob(args, true); - streamJob.setConf(myConf); - assertTrue("Job has not succeeded", streamJob.go() == 0); - assertOwnerShip(outputPath); - return null; - } - }); - } - - /** - * Verify if the permissions of distcache dir contents are valid once the job - * is finished - */ - public void testStreamingWithDistCache() - throws Exception { - if (!shouldRun()) { - return; - } - startCluster(); - final String[] localDirs = mrCluster.getTaskTrackerLocalDirs(0); - final JobConf myConf = getClusterConf(); - - // create file that will go into public distributed cache - File publicFile = new File(System.getProperty( - "test.build.data", "/tmp"), "publicFile"); - FileOutputStream fstream = new FileOutputStream(publicFile); - fstream.write("public file contents".getBytes()); - fstream.close(); - - // put the file(that should go into public dist cache) in dfs and set - // read and exe permissions for others - FileSystem dfs = dfsCluster.getFileSystem(); - dfs.setPermission(new Path(dfs.getDefaultUri(myConf).toString() + "/tmp"), - new FsPermission((short)0755)); - final String publicCacheFile = dfs.getDefaultUri(myConf).toString() - + "/tmp/publicFile"; - dfs.copyFromLocalFile(new Path(publicFile.getAbsolutePath()), - new Path(publicCacheFile)); - dfs.setPermission(new Path(publicCacheFile), new FsPermission((short)0755)); - final String taskTrackerUser - = UserGroupInformation.getCurrentUser().getShortUserName(); - - jobOwner.doAs(new PrivilegedExceptionAction() { - public Void run() throws Exception{ - - FileSystem inFs = inputPath.getFileSystem(myConf); - FileSystem outFs = outputPath.getFileSystem(myConf); - outFs.delete(outputPath, true); - if (!inFs.mkdirs(inputPath)) { - throw new IOException("Mkdirs failed to create " + inFs.toString()); - } - - // create input file - DataOutputStream file = inFs.create(new Path(inputPath, "part-0")); - file.writeBytes(input); - file.close(); - - // Create file that will be passed using -files option. - // This is private dist cache file - File privateFile = new File(System.getProperty( - "test.build.data", "/tmp"), "test.sh"); - privateFile.createNewFile(); - - String[] args = - new String[] { - "-files", privateFile.toString() + "," + publicCacheFile, - "-Dmapreduce.task.files.preserve.failedtasks=true", - "-Dstream.tmpdir=" + System.getProperty("test.build.data", "/tmp"), - "-input", inputPath.makeQualified(inFs).toString(), - "-output", outputPath.makeQualified(outFs).toString(), - "-mapper", "pwd", - "-reducer", StreamJob.REDUCE_NONE - }; - StreamJob streamJob = new StreamJob(); - streamJob.setConf(myConf); - - assertTrue("Job failed", ToolRunner.run(streamJob, args)==0); - - // validate private cache files' permissions - checkPermissionsOnPrivateDistCache(localDirs, - jobOwner.getShortUserName(), taskTrackerUser, - taskTrackerSpecialGroup); - - // check the file is present even after the job is over. - // work directory symlink cleanup should not have removed the target - // files. - checkPresenceOfPrivateDistCacheFiles(localDirs, - jobOwner.getShortUserName(), new String[] {"test.sh"}); - - // validate private cache files' permissions - checkPermissionsOnPublicDistCache(FileSystem.getLocal(myConf), - localDirs, taskTrackerUser, taskTrackerPrimaryGroup); - - checkPresenceOfPublicDistCacheFiles(localDirs, - new String[] {"publicFile"}); - assertOwnerShip(outputPath); - return null; - } - }); - } -} diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java index 979a030d38..1209164231 100644 --- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java +++ b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobTracker.java @@ -1483,7 +1483,7 @@ public int compare(TaskTrackerStatus p1, TaskTrackerStatus p2) { taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf); int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10); - this.interTrackerServer = RPC.getServer(ClientProtocol.class, + this.interTrackerServer = RPC.getServer(JobTracker.class, // All protocols in JobTracker this, addr.getHostName(), addr.getPort(), handlerCount, diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LinuxTaskController.java b/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LinuxTaskController.java deleted file mode 100644 index b3e63ae6e6..0000000000 --- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LinuxTaskController.java +++ /dev/null @@ -1,657 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; -import org.apache.hadoop.mapred.JvmManager.JvmEnv; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Shell.ExitCodeException; -import org.apache.hadoop.util.Shell.ShellCommandExecutor; - -/** - * A {@link TaskController} that runs the task JVMs as the user - * who submits the job. - * - * This class executes a setuid executable to implement methods - * of the {@link TaskController}, including launching the task - * JVM and killing it when needed, and also initializing and - * finalizing the task environment. - *

    The setuid executable is launched using the command line:

    - *

    task-controller mapreduce.job.user.name command command-args, where

    - *

    mapreduce.job.user.name is the name of the owner who submits the job

    - *

    command is one of the cardinal value of the - * {@link LinuxTaskController.TaskControllerCommands} enumeration

    - *

    command-args depends on the command being launched.

    - * - * In addition to running and killing tasks, the class also - * sets up appropriate access for the directories and files - * that will be used by the tasks. - */ -class LinuxTaskController extends TaskController { - - private static final Log LOG = - LogFactory.getLog(LinuxTaskController.class); - - // Name of the executable script that will contain the child - // JVM command line. See writeCommand for details. - private static final String COMMAND_FILE = "taskjvm.sh"; - - // Path to the setuid executable. - private static String taskControllerExe; - - static { - // the task-controller is expected to be under the $HADOOP_PREFIX/bin - // directory. - File hadoopBin = new File(System.getenv("HADOOP_PREFIX"), "bin"); - taskControllerExe = - new File(hadoopBin, "task-controller").getAbsolutePath(); - } - - public LinuxTaskController() { - super(); - } - - /** - * List of commands that the setuid script will execute. - */ - enum TaskControllerCommands { - INITIALIZE_USER, - INITIALIZE_JOB, - INITIALIZE_DISTRIBUTEDCACHE_FILE, - LAUNCH_TASK_JVM, - INITIALIZE_TASK, - TERMINATE_TASK_JVM, - KILL_TASK_JVM, - RUN_DEBUG_SCRIPT, - SIGQUIT_TASK_JVM, - ENABLE_TASK_FOR_CLEANUP, - ENABLE_JOB_FOR_CLEANUP - } - - @Override - public void setup() throws IOException { - super.setup(); - - // Check the permissions of the task-controller binary by running it plainly. - // If permissions are correct, it returns an error code 1, else it returns - // 24 or something else if some other bugs are also present. - String[] taskControllerCmd = - new String[] { getTaskControllerExecutablePath() }; - ShellCommandExecutor shExec = new ShellCommandExecutor(taskControllerCmd); - try { - shExec.execute(); - } catch (ExitCodeException e) { - int exitCode = shExec.getExitCode(); - if (exitCode != 1) { - LOG.warn("Exit code from checking binary permissions is : " + exitCode); - logOutput(shExec.getOutput()); - throw new IOException("Task controller setup failed because of invalid" - + "permissions/ownership with exit code " + exitCode, e); - } - } - } - - /** - * Launch a task JVM that will run as the owner of the job. - * - * This method launches a task JVM by executing a setuid executable that will - * switch to the user and run the task. Also does initialization of the first - * task in the same setuid process launch. - */ - @Override - void launchTaskJVM(TaskController.TaskControllerContext context) - throws IOException { - JvmEnv env = context.env; - // get the JVM command line. - String cmdLine = - TaskLog.buildCommandLine(env.setup, env.vargs, env.stdout, env.stderr, - env.logSize, true); - - StringBuffer sb = new StringBuffer(); - //export out all the environment variable before child command as - //the setuid/setgid binaries would not be getting, any environmental - //variables which begin with LD_*. - for(Entry entry : env.env.entrySet()) { - sb.append("export "); - sb.append(entry.getKey()); - sb.append("="); - sb.append(entry.getValue()); - sb.append("\n"); - } - sb.append(cmdLine); - // write the command to a file in the - // task specific cache directory - writeCommand(sb.toString(), getTaskCacheDirectory(context, - context.env.workDir)); - - // Call the taskcontroller with the right parameters. - List launchTaskJVMArgs = buildLaunchTaskArgs(context, - context.env.workDir); - ShellCommandExecutor shExec = buildTaskControllerExecutor( - TaskControllerCommands.LAUNCH_TASK_JVM, - env.conf.getUser(), - launchTaskJVMArgs, env.workDir, env.env); - context.shExec = shExec; - try { - shExec.execute(); - } catch (Exception e) { - int exitCode = shExec.getExitCode(); - LOG.warn("Exit code from task is : " + exitCode); - // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the task was - // terminated/killed forcefully. In all other cases, log the - // task-controller output - if (exitCode != 143 && exitCode != 137) { - LOG.warn("Exception thrown while launching task JVM : " - + StringUtils.stringifyException(e)); - LOG.info("Output from LinuxTaskController's launchTaskJVM follows:"); - logOutput(shExec.getOutput()); - } - throw new IOException(e); - } - if (LOG.isDebugEnabled()) { - LOG.info("Output from LinuxTaskController's launchTaskJVM follows:"); - logOutput(shExec.getOutput()); - } - } - - /** - * Launch the debug script process that will run as the owner of the job. - * - * This method launches the task debug script process by executing a setuid - * executable that will switch to the user and run the task. - */ - @Override - void runDebugScript(DebugScriptContext context) throws IOException { - String debugOut = FileUtil.makeShellPath(context.stdout); - String cmdLine = TaskLog.buildDebugScriptCommandLine(context.args, debugOut); - writeCommand(cmdLine, getTaskCacheDirectory(context, context.workDir)); - // Call the taskcontroller with the right parameters. - List launchTaskJVMArgs = buildLaunchTaskArgs(context, context.workDir); - runCommand(TaskControllerCommands.RUN_DEBUG_SCRIPT, context.task.getUser(), - launchTaskJVMArgs, context.workDir, null); - } - /** - * Helper method that runs a LinuxTaskController command - * - * @param taskControllerCommand - * @param user - * @param cmdArgs - * @param env - * @throws IOException - */ - private void runCommand(TaskControllerCommands taskControllerCommand, - String user, List cmdArgs, File workDir, Map env) - throws IOException { - - ShellCommandExecutor shExec = - buildTaskControllerExecutor(taskControllerCommand, user, cmdArgs, - workDir, env); - try { - shExec.execute(); - } catch (Exception e) { - LOG.warn("Exit code from " + taskControllerCommand.toString() + " is : " - + shExec.getExitCode()); - LOG.warn("Exception thrown by " + taskControllerCommand.toString() + " : " - + StringUtils.stringifyException(e)); - LOG.info("Output from LinuxTaskController's " - + taskControllerCommand.toString() + " follows:"); - logOutput(shExec.getOutput()); - throw new IOException(e); - } - if (LOG.isDebugEnabled()) { - LOG.info("Output from LinuxTaskController's " - + taskControllerCommand.toString() + " follows:"); - logOutput(shExec.getOutput()); - } - } - - /** - * Returns list of arguments to be passed while initializing a new task. See - * {@code buildTaskControllerExecutor(TaskControllerCommands, String, - * List, JvmEnv)} documentation. - * - * @param context - * @return Argument to be used while launching Task VM - */ - private List buildInitializeTaskArgs(TaskExecContext context) { - List commandArgs = new ArrayList(3); - String taskId = context.task.getTaskID().toString(); - String jobId = getJobId(context); - commandArgs.add(jobId); - if (!context.task.isTaskCleanupTask()) { - commandArgs.add(taskId); - } else { - commandArgs.add(taskId + TaskTracker.TASK_CLEANUP_SUFFIX); - } - return commandArgs; - } - - @Override - void initializeTask(TaskControllerContext context) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Going to do " - + TaskControllerCommands.INITIALIZE_TASK.toString() - + " for " + context.task.getTaskID().toString()); - } - runCommand(TaskControllerCommands.INITIALIZE_TASK, - context.env.conf.getUser(), - buildInitializeTaskArgs(context), context.env.workDir, context.env.env); - } - - /** - * Builds the args to be passed to task-controller for enabling of task for - * cleanup. Last arg in this List is either $attemptId or $attemptId/work - */ - private List buildTaskCleanupArgs( - TaskControllerTaskPathDeletionContext context) { - List commandArgs = new ArrayList(3); - commandArgs.add(context.mapredLocalDir.toUri().getPath()); - commandArgs.add(context.task.getJobID().toString()); - - String workDir = ""; - if (context.isWorkDir) { - workDir = "/work"; - } - if (context.task.isTaskCleanupTask()) { - commandArgs.add(context.task.getTaskID() + TaskTracker.TASK_CLEANUP_SUFFIX - + workDir); - } else { - commandArgs.add(context.task.getTaskID() + workDir); - } - - return commandArgs; - } - - /** - * Builds the args to be passed to task-controller for enabling of job for - * cleanup. Last arg in this List is $jobid. - */ - private List buildJobCleanupArgs( - TaskControllerJobPathDeletionContext context) { - List commandArgs = new ArrayList(2); - commandArgs.add(context.mapredLocalDir.toUri().getPath()); - commandArgs.add(context.jobId.toString()); - - return commandArgs; - } - - /** - * Enables the task for cleanup by changing permissions of the specified path - * in the local filesystem - */ - @Override - void enableTaskForCleanup(PathDeletionContext context) - throws IOException { - if (context instanceof TaskControllerTaskPathDeletionContext) { - TaskControllerTaskPathDeletionContext tContext = - (TaskControllerTaskPathDeletionContext) context; - enablePathForCleanup(tContext, - TaskControllerCommands.ENABLE_TASK_FOR_CLEANUP, - buildTaskCleanupArgs(tContext)); - } - else { - throw new IllegalArgumentException("PathDeletionContext provided is not " - + "TaskControllerTaskPathDeletionContext."); - } - } - - /** - * Enables the job for cleanup by changing permissions of the specified path - * in the local filesystem - */ - @Override - void enableJobForCleanup(PathDeletionContext context) - throws IOException { - if (context instanceof TaskControllerJobPathDeletionContext) { - TaskControllerJobPathDeletionContext tContext = - (TaskControllerJobPathDeletionContext) context; - enablePathForCleanup(tContext, - TaskControllerCommands.ENABLE_JOB_FOR_CLEANUP, - buildJobCleanupArgs(tContext)); - } else { - throw new IllegalArgumentException("PathDeletionContext provided is not " - + "TaskControllerJobPathDeletionContext."); - } - } - - /** - * Enable a path for cleanup - * @param c {@link TaskControllerPathDeletionContext} for the path to be - * cleaned up - * @param command {@link TaskControllerCommands} for task/job cleanup - * @param cleanupArgs arguments for the {@link LinuxTaskController} to enable - * path cleanup - */ - private void enablePathForCleanup(TaskControllerPathDeletionContext c, - TaskControllerCommands command, - List cleanupArgs) { - if (LOG.isDebugEnabled()) { - LOG.debug("Going to do " + command.toString() + " for " + c.fullPath); - } - - if ( c.user != null && c.fs instanceof LocalFileSystem) { - try { - runCommand(command, c.user, cleanupArgs, null, null); - } catch(IOException e) { - LOG.warn("Unable to change permissions for " + c.fullPath); - } - } - else { - throw new IllegalArgumentException("Either user is null or the " - + "file system is not local file system."); - } - } - - private void logOutput(String output) { - String shExecOutput = output; - if (shExecOutput != null) { - for (String str : shExecOutput.split("\n")) { - LOG.info(str); - } - } - } - - private String getJobId(TaskExecContext context) { - String taskId = context.task.getTaskID().toString(); - TaskAttemptID tId = TaskAttemptID.forName(taskId); - String jobId = tId.getJobID().toString(); - return jobId; - } - - /** - * Returns list of arguments to be passed while launching task VM. - * See {@code buildTaskControllerExecutor(TaskControllerCommands, - * String, List, JvmEnv)} documentation. - * @param context - * @return Argument to be used while launching Task VM - */ - private List buildLaunchTaskArgs(TaskExecContext context, - File workDir) { - List commandArgs = new ArrayList(3); - LOG.debug("getting the task directory as: " - + getTaskCacheDirectory(context, workDir)); - LOG.debug("getting the tt_root as " +getDirectoryChosenForTask( - new File(getTaskCacheDirectory(context, workDir)), - context) ); - commandArgs.add(getDirectoryChosenForTask( - new File(getTaskCacheDirectory(context, workDir)), - context)); - commandArgs.addAll(buildInitializeTaskArgs(context)); - return commandArgs; - } - - // Get the directory from the list of directories configured - // in Configs.LOCAL_DIR chosen for storing data pertaining to - // this task. - private String getDirectoryChosenForTask(File directory, - TaskExecContext context) { - String jobId = getJobId(context); - String taskId = context.task.getTaskID().toString(); - for (String dir : mapredLocalDirs) { - File mapredDir = new File(dir); - File taskDir = - new File(mapredDir, TaskTracker.getTaskWorkDir(context.task - .getUser(), jobId, taskId, context.task.isTaskCleanupTask())) - .getParentFile(); - if (directory.equals(taskDir)) { - return dir; - } - } - - LOG.error("Couldn't parse task cache directory correctly"); - throw new IllegalArgumentException("invalid task cache directory " - + directory.getAbsolutePath()); - } - - /** - * Builds the command line for launching/terminating/killing task JVM. - * Following is the format for launching/terminating/killing task JVM - *
    - * For launching following is command line argument: - *
    - * {@code mapreduce.job.user.name command tt-root job_id task_id} - *
    - * For terminating/killing task jvm. - * {@code mapreduce.job.user.name command tt-root task-pid} - * - * @param command command to be executed. - * @param userName mapreduce.job.user.name - * @param cmdArgs list of extra arguments - * @param workDir working directory for the task-controller - * @param env JVM environment variables. - * @return {@link ShellCommandExecutor} - * @throws IOException - */ - private ShellCommandExecutor buildTaskControllerExecutor( - TaskControllerCommands command, String userName, List cmdArgs, - File workDir, Map env) - throws IOException { - String[] taskControllerCmd = new String[3 + cmdArgs.size()]; - taskControllerCmd[0] = getTaskControllerExecutablePath(); - taskControllerCmd[1] = userName; - taskControllerCmd[2] = String.valueOf(command.ordinal()); - int i = 3; - for (String cmdArg : cmdArgs) { - taskControllerCmd[i++] = cmdArg; - } - if (LOG.isDebugEnabled()) { - for (String cmd : taskControllerCmd) { - LOG.debug("taskctrl command = " + cmd); - } - } - ShellCommandExecutor shExec = null; - if(workDir != null && workDir.exists()) { - shExec = new ShellCommandExecutor(taskControllerCmd, - workDir, env); - } else { - shExec = new ShellCommandExecutor(taskControllerCmd); - } - - return shExec; - } - - // Return the task specific directory under the cache. - private String getTaskCacheDirectory(TaskExecContext context, - File workDir) { - // In the case of JVM reuse, the task specific directory - // is different from what is set with respect with - // env.workDir. Hence building this from the taskId everytime. - String taskId = context.task.getTaskID().toString(); - File cacheDirForJob = workDir.getParentFile().getParentFile(); - if(context.task.isTaskCleanupTask()) { - taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX; - } - return new File(cacheDirForJob, taskId).getAbsolutePath(); - } - - // Write the JVM command line to a file under the specified directory - // Note that the JVM will be launched using a setuid executable, and - // could potentially contain strings defined by a user. Hence, to - // prevent special character attacks, we write the command line to - // a file and execute it. - private void writeCommand(String cmdLine, - String directory) throws IOException { - - PrintWriter pw = null; - String commandFile = directory + File.separator + COMMAND_FILE; - LOG.info("Writing commands to " + commandFile); - LOG.info("--------Commands Begin--------"); - LOG.info(cmdLine); - LOG.info("--------Commands End--------"); - try { - FileWriter fw = new FileWriter(commandFile); - BufferedWriter bw = new BufferedWriter(fw); - pw = new PrintWriter(bw); - pw.write(cmdLine); - } catch (IOException ioe) { - LOG.error("Caught IOException while writing JVM command line to file. " - + ioe.getMessage()); - } finally { - if (pw != null) { - pw.close(); - } - // set execute permissions for all on the file. - File f = new File(commandFile); - if (f.exists()) { - f.setReadable(true, false); - f.setExecutable(true, false); - } - } - } - - private List buildInitializeJobCommandArgs( - JobInitializationContext context) { - List initJobCmdArgs = new ArrayList(); - initJobCmdArgs.add(context.jobid.toString()); - return initJobCmdArgs; - } - - @Override - void initializeJob(JobInitializationContext context) - throws IOException { - LOG.debug("Going to initialize job " + context.jobid.toString() - + " on the TT"); - runCommand(TaskControllerCommands.INITIALIZE_JOB, context.user, - buildInitializeJobCommandArgs(context), context.workDir, null); - } - - @Override - public void initializeDistributedCacheFile(DistributedCacheFileContext context) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Going to initialize distributed cache for " + context.user - + " with localizedBaseDir " + context.localizedBaseDir + - " and uniqueString " + context.uniqueString); - } - List args = new ArrayList(); - // Here, uniqueString might start with '-'. Adding -- in front of the - // arguments indicates that they are non-option parameters. - args.add("--"); - args.add(context.localizedBaseDir.toString()); - args.add(context.uniqueString); - runCommand(TaskControllerCommands.INITIALIZE_DISTRIBUTEDCACHE_FILE, - context.user, args, context.workDir, null); - } - - @Override - public void initializeUser(InitializationContext context) - throws IOException { - LOG.debug("Going to initialize user directories for " + context.user - + " on the TT"); - runCommand(TaskControllerCommands.INITIALIZE_USER, context.user, - new ArrayList(), context.workDir, null); - } - - /** - * API which builds the command line to be pass to LinuxTaskController - * binary to terminate/kill the task. See - * {@code buildTaskControllerExecutor(TaskControllerCommands, - * String, List, JvmEnv)} documentation. - * - * - * @param context context of task which has to be passed kill signal. - * - */ - private List buildKillTaskCommandArgs(TaskControllerContext - context){ - List killTaskJVMArgs = new ArrayList(); - killTaskJVMArgs.add(context.pid); - return killTaskJVMArgs; - } - - /** - * Convenience method used to sending appropriate signal to the task - * VM - * @param context - * @param command - * @throws IOException - */ - protected void signalTask(TaskControllerContext context, - TaskControllerCommands command) throws IOException{ - if(context.task == null) { - LOG.info("Context task is null; not signaling the JVM"); - return; - } - ShellCommandExecutor shExec = buildTaskControllerExecutor( - command, context.env.conf.getUser(), - buildKillTaskCommandArgs(context), context.env.workDir, - context.env.env); - try { - shExec.execute(); - } catch (Exception e) { - LOG.warn("Output from task-contoller is : " + shExec.getOutput()); - throw new IOException(e); - } - } - - @Override - void terminateTask(TaskControllerContext context) { - try { - signalTask(context, TaskControllerCommands.TERMINATE_TASK_JVM); - } catch (Exception e) { - LOG.warn("Exception thrown while sending kill to the Task VM " + - StringUtils.stringifyException(e)); - } - } - - @Override - void killTask(TaskControllerContext context) { - try { - signalTask(context, TaskControllerCommands.KILL_TASK_JVM); - } catch (Exception e) { - LOG.warn("Exception thrown while sending destroy to the Task VM " + - StringUtils.stringifyException(e)); - } - } - - @Override - void dumpTaskStack(TaskControllerContext context) { - try { - signalTask(context, TaskControllerCommands.SIGQUIT_TASK_JVM); - } catch (Exception e) { - LOG.warn("Exception thrown while sending SIGQUIT to the Task VM " + - StringUtils.stringifyException(e)); - } - } - - protected String getTaskControllerExecutablePath() { - return taskControllerExe; - } - - @Override - String getRunAsUser(JobConf conf) { - return conf.getUser(); - } -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java deleted file mode 100644 index 75e25522f9..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java +++ /dev/null @@ -1,511 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.List; -import java.util.ArrayList; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapreduce.filecache.TestTrackerDistributedCacheManager; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.StringUtils; - -import junit.framework.TestCase; - -/** - * The base class which starts up a cluster with LinuxTaskController as the task - * controller. - * - * In order to run test cases utilizing LinuxTaskController please follow the - * following steps: - *
      - *
    1. Build LinuxTaskController by not passing any - * -Dhadoop.conf.dir
    2. - *
    3. Change ownership of the built binary to root:group1, where group1 is - * a secondary group of the test runner.
    4. - *
    5. Change permissions on the binary so that others component does - * not have any permissions on binary
    6. - *
    7. Make the built binary to setuid and setgid executable
    8. - *
    9. Execute following targets: - * ant test -Dcompile.c++=true -Dtaskcontroller-path=path to built binary - * -Dtaskcontroller-ugi=user,group - *
      (Note that "path to built binary" means the directory containing task-controller - - * not the actual complete path of the binary itself. This path must end in ".../bin") - *
    10. - *
    - * - */ -public class ClusterWithLinuxTaskController extends TestCase { - private static final Log LOG = - LogFactory.getLog(ClusterWithLinuxTaskController.class); - - /** - * The wrapper class around LinuxTaskController which allows modification of - * the custom path to task-controller which we can use for task management. - * - **/ - public static class MyLinuxTaskController extends LinuxTaskController { - String taskControllerExePath = System.getProperty(TASKCONTROLLER_PATH) - + "/task-controller"; - - @Override - public void setup() throws IOException { - getConf().set(TTConfig.TT_GROUP, taskTrackerSpecialGroup); - - // write configuration file - configurationFile = createTaskControllerConf(System - .getProperty(TASKCONTROLLER_PATH), getConf()); - super.setup(); - } - - @Override - protected String getTaskControllerExecutablePath() { - return new File(taskControllerExePath).getAbsolutePath(); - } - - void setTaskControllerExe(String execPath) { - this.taskControllerExePath = execPath; - } - - volatile static int attemptedSigQuits = 0; - volatile static int failedSigQuits = 0; - - /** Work like LinuxTaskController, but also count the number of - * attempted and failed SIGQUIT sends via the task-controller - * executable. - */ - @Override - void dumpTaskStack(TaskControllerContext context) { - attemptedSigQuits++; - try { - signalTask(context, TaskControllerCommands.SIGQUIT_TASK_JVM); - } catch (Exception e) { - LOG.warn("Execution sending SIGQUIT: " + StringUtils.stringifyException(e)); - failedSigQuits++; - } - } - } - - // cluster instances which sub classes can use - protected MiniMRCluster mrCluster = null; - protected MiniDFSCluster dfsCluster = null; - - private JobConf clusterConf = null; - protected Path homeDirectory; - - /** changing this to a larger number needs more work for creating - * taskcontroller.cfg. - * see {@link #startCluster()} and - * {@link #createTaskControllerConf(String, Configuration)} - */ - private static final int NUMBER_OF_NODES = 1; - - static final String TASKCONTROLLER_PATH = "taskcontroller-path"; - static final String TASKCONTROLLER_UGI = "taskcontroller-ugi"; - - private static File configurationFile = null; - - protected UserGroupInformation jobOwner; - - protected static String taskTrackerSpecialGroup = null; - /** - * Primary group of the tasktracker - i.e. the user running the - * test. - */ - protected static String taskTrackerPrimaryGroup = null; - static { - if (isTaskExecPathPassed()) { - try { - taskTrackerSpecialGroup = FileSystem.getLocal(new Configuration()) - .getFileStatus( - new Path(System.getProperty(TASKCONTROLLER_PATH), - "task-controller")).getGroup(); - } catch (IOException e) { - LOG.warn("Could not get group of the binary", e); - fail("Could not get group of the binary"); - } - try { - taskTrackerPrimaryGroup = - UserGroupInformation.getCurrentUser().getGroupNames()[0]; - } catch (IOException ioe) { - LOG.warn("Could not get primary group of the current user", ioe); - fail("Could not get primary group of the current user"); - } - } - } - - /* - * Utility method which subclasses use to start and configure the MR Cluster - * so they can directly submit a job. - */ - protected void startCluster() - throws IOException, InterruptedException { - JobConf conf = new JobConf(); - dfsCluster = new MiniDFSCluster(conf, NUMBER_OF_NODES, true, null); - conf.set(TTConfig.TT_TASK_CONTROLLER, - MyLinuxTaskController.class.getName()); - conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false); - mrCluster = - new MiniMRCluster(NUMBER_OF_NODES, dfsCluster.getFileSystem().getUri() - .toString(), 4, null, null, conf); - - clusterConf = mrCluster.createJobConf(); - - String ugi = System.getProperty(TASKCONTROLLER_UGI); - String[] splits = ugi.split(","); - jobOwner = UserGroupInformation.createUserForTesting(splits[0], - new String[]{splits[1]}); - createHomeAndStagingDirectory(clusterConf); - } - - private void createHomeAndStagingDirectory(JobConf conf) - throws IOException { - FileSystem fs = dfsCluster.getFileSystem(); - String path = "/user/" + jobOwner.getUserName(); - homeDirectory = new Path(path); - LOG.info("Creating Home directory : " + homeDirectory); - fs.mkdirs(homeDirectory); - changePermission(fs); - Path stagingArea = new Path(conf.get(JTConfig.JT_STAGING_AREA_ROOT)); - LOG.info("Creating Staging root directory : " + stagingArea); - fs.mkdirs(stagingArea); - fs.setPermission(stagingArea, new FsPermission((short)0777)); - } - - private void changePermission(FileSystem fs) - throws IOException { - fs.setOwner(homeDirectory, jobOwner.getUserName(), - jobOwner.getGroupNames()[0]); - } - - static File getTaskControllerConfFile(String path) { - File confDirectory = new File(path, "../conf"); - return new File(confDirectory, "taskcontroller.cfg"); - } - - /** - * Create taskcontroller.cfg. - * - * @param path Path to the taskcontroller binary. - * @param conf TaskTracker's configuration - * @return the created conf file - * @throws IOException - */ - static File createTaskControllerConf(String path, - Configuration conf) throws IOException { - File confDirectory = new File(path, "../conf"); - if (!confDirectory.exists()) { - confDirectory.mkdirs(); - } - File configurationFile = new File(confDirectory, "taskcontroller.cfg"); - PrintWriter writer = - new PrintWriter(new FileOutputStream(configurationFile)); - - writer.println(String.format(MRConfig.LOCAL_DIR + "=%s", conf - .get(MRConfig.LOCAL_DIR))); - - writer - .println(String.format("hadoop.log.dir=%s", TaskLog.getBaseLogDir())); - writer.println(String.format(TTConfig.TT_GROUP + "=%s", - conf.get(TTConfig.TT_GROUP))); - - writer.flush(); - writer.close(); - return configurationFile; - } - - /** - * Can we run the tests with LinuxTaskController? - * - * @return boolean - */ - protected static boolean shouldRun() { - if (!isTaskExecPathPassed() || !isUserPassed()) { - LOG.info("Not running test."); - return false; - } - return true; - } - - static boolean isTaskExecPathPassed() { - String path = System.getProperty(TASKCONTROLLER_PATH); - if (path == null || path.isEmpty() - || path.equals("${" + TASKCONTROLLER_PATH + "}")) { - LOG.info("Invalid taskcontroller-path : " + path); - return false; - } - return true; - } - - private static boolean isUserPassed() { - String ugi = System.getProperty(TASKCONTROLLER_UGI); - if (ugi != null && !(ugi.equals("${" + TASKCONTROLLER_UGI + "}")) - && !ugi.isEmpty()) { - if (ugi.indexOf(",") > 1) { - return true; - } - LOG.info("Invalid taskcontroller-ugi : " + ugi); - return false; - } - LOG.info("Invalid taskcontroller-ugi : " + ugi); - return false; - } - - protected JobConf getClusterConf() { - return new JobConf(clusterConf); - } - - @Override - protected void tearDown() - throws Exception { - if (mrCluster != null) { - mrCluster.shutdown(); - } - - if (dfsCluster != null) { - dfsCluster.shutdown(); - } - - if (configurationFile != null) { - configurationFile.delete(); - } - - super.tearDown(); - } - - /** - * Assert that the job is actually run by the specified user by verifying the - * permissions of the output part-files. - * - * @param outDir - * @throws IOException - */ - protected void assertOwnerShip(Path outDir) - throws IOException { - FileSystem fs = outDir.getFileSystem(clusterConf); - assertOwnerShip(outDir, fs); - } - - /** - * Assert that the job is actually run by the specified user by verifying the - * permissions of the output part-files. - * - * @param outDir - * @param fs - * @throws IOException - */ - protected void assertOwnerShip(Path outDir, FileSystem fs) - throws IOException { - for (FileStatus status : fs.listStatus(outDir, - new Utils.OutputFileUtils - .OutputFilesFilter())) { - String owner = status.getOwner(); - String group = status.getGroup(); - LOG.info("Ownership of the file is " + status.getPath() + " is " + owner - + "," + group); - assertTrue("Output part-file's owner is not correct. Expected : " - + jobOwner.getUserName() + " Found : " + owner, owner - .equals(jobOwner.getUserName())); - assertTrue("Output part-file's group is not correct. Expected : " - + jobOwner.getGroupNames()[0] + " Found : " + group, group - .equals(jobOwner.getGroupNames()[0])); - } - } - - /** - * Validates permissions of private distcache dir and its contents fully - */ - public static void checkPermissionsOnPrivateDistCache(String[] localDirs, - String user, String taskTrackerUser, String groupOwner) - throws IOException { - // user-dir, jobcache and distcache will have - // 2770 permissions if jobOwner is same as tt_user - // 2570 permissions for any other user - String expectedDirPerms = taskTrackerUser.equals(user) - ? "drwxrws---" - : "dr-xrws---"; - String expectedFilePerms = taskTrackerUser.equals(user) - ? "-rwxrwx---" - : "-r-xrwx---"; - for (String localDir : localDirs) { - File distCacheDir = new File(localDir, - TaskTracker.getPrivateDistributedCacheDir(user)); - if (distCacheDir.exists()) { - checkPermissionsOnDir(distCacheDir, user, groupOwner, expectedDirPerms, - expectedFilePerms); - } - } - } - - /** - * Check that files expected to be localized in distributed cache for a user - * are present. - * @param localDirs List of mapred local directories. - * @param user User against which localization is happening - * @param expectedFileNames List of files expected to be localized - * @throws IOException - */ - public static void checkPresenceOfPrivateDistCacheFiles(String[] localDirs, - String user, String[] expectedFileNames) throws IOException { - FileGatherer gatherer = new FileGatherer(); - for (String localDir : localDirs) { - File distCacheDir = new File(localDir, - TaskTracker.getPrivateDistributedCacheDir(user)); - findExpectedFiles(expectedFileNames, distCacheDir, gatherer); - } - assertEquals("Files expected in private distributed cache were not found", - expectedFileNames.length, gatherer.getCount()); - } - - /** - * Validates permissions and ownership of public distcache dir and its - * contents fully in all local dirs - */ - public static void checkPermissionsOnPublicDistCache(FileSystem localFS, - String[] localDirs, String owner, String group) throws IOException { - for (String localDir : localDirs) { - File distCacheDir = new File(localDir, - TaskTracker.getPublicDistributedCacheDir()); - - if (distCacheDir.exists()) { - checkPublicFilePermissions(localFS, distCacheDir, owner, group); - } - } - } - - /** - * Checks that files expected to be localized in the public distributed - * cache are present - * @param localDirs List of mapred local directories - * @param expectedFileNames List of expected file names. - * @throws IOException - */ - public static void checkPresenceOfPublicDistCacheFiles(String[] localDirs, - String[] expectedFileNames) throws IOException { - FileGatherer gatherer = new FileGatherer(); - for (String localDir : localDirs) { - File distCacheDir = new File(localDir, - TaskTracker.getPublicDistributedCacheDir()); - findExpectedFiles(expectedFileNames, distCacheDir, gatherer); - } - assertEquals("Files expected in public distributed cache were not found", - expectedFileNames.length, gatherer.getCount()); - } - - /** - * Validates permissions and ownership on the public distributed cache files - */ - private static void checkPublicFilePermissions(FileSystem localFS, File dir, - String owner, String group) - throws IOException { - Path dirPath = new Path(dir.getAbsolutePath()); - TestTrackerDistributedCacheManager.checkPublicFilePermissions(localFS, - new Path[] {dirPath}); - TestTrackerDistributedCacheManager.checkPublicFileOwnership(localFS, - new Path[] {dirPath}, owner, group); - if (dir.isDirectory()) { - File[] files = dir.listFiles(); - for (File file : files) { - checkPublicFilePermissions(localFS, file, owner, group); - } - } - } - - /** - * Validates permissions of given dir and its contents fully(i.e. recursively) - */ - private static void checkPermissionsOnDir(File dir, String user, - String groupOwner, String expectedDirPermissions, - String expectedFilePermissions) throws IOException { - TestTaskTrackerLocalization.checkFilePermissions(dir.toString(), - expectedDirPermissions, user, groupOwner); - File[] files = dir.listFiles(); - for (File file : files) { - if (file.isDirectory()) { - checkPermissionsOnDir(file, user, groupOwner, expectedDirPermissions, - expectedFilePermissions); - } else { - TestTaskTrackerLocalization.checkFilePermissions(file.toString(), - expectedFilePermissions, user, groupOwner); - } - } - } - - // Check which files among those expected are present in the rootDir - // Add those present to the FileGatherer. - private static void findExpectedFiles(String[] expectedFileNames, - File rootDir, FileGatherer gatherer) { - - File[] files = rootDir.listFiles(); - if (files == null) { - return; - } - for (File file : files) { - if (file.isDirectory()) { - findExpectedFiles(expectedFileNames, file, gatherer); - } else { - if (isFilePresent(expectedFileNames, file)) { - gatherer.addFileName(file.getName()); - } - } - } - - } - - // Test if the passed file is present in the expected list of files. - private static boolean isFilePresent(String[] expectedFileNames, File file) { - boolean foundFileName = false; - for (String name : expectedFileNames) { - if (name.equals(file.getName())) { - foundFileName = true; - break; - } - } - return foundFileName; - } - - // Helper class to collect a list of file names across multiple - // method calls. Wrapper around a collection defined for clarity - private static class FileGatherer { - List foundFileNames = new ArrayList(); - - void addFileName(String fileName) { - foundFileNames.add(fileName); - } - - int getCount() { - return foundFileNames.size(); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java deleted file mode 100644 index 7a274a4ff1..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Test; - -public class TestDebugScriptWithLinuxTaskController extends - ClusterWithLinuxTaskController { - - @Test - public void testDebugScriptExecutionAsDifferentUser() throws Exception { - if (!super.shouldRun()) { - return; - } - super.startCluster(); - TestDebugScript.setupDebugScriptDirs(); - final Path inDir = new Path("input"); - final Path outDir = new Path("output"); - JobConf conf = super.getClusterConf(); - FileSystem fs = inDir.getFileSystem(conf); - fs.mkdirs(inDir); - Path p = new Path(inDir, "1.txt"); - fs.createNewFile(p); - String splits[] = System - .getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_UGI). - split(","); - JobID jobId = UserGroupInformation.createUserForTesting(splits[0], - new String[]{splits[1]}).doAs(new PrivilegedExceptionAction() { - public JobID run() throws IOException{ - return TestDebugScript.runFailingMapJob( - TestDebugScriptWithLinuxTaskController.this.getClusterConf(), - inDir, outDir); - } - }); - // construct the task id of first map task of failmap - TaskAttemptID taskId = new TaskAttemptID( - new TaskID(jobId,TaskType.MAP, 0), 0); - TestDebugScript.verifyDebugScriptOutput(taskId, splits[0], - taskTrackerSpecialGroup, "-rw-rw----"); - TestDebugScript.cleanupDebugScriptDirs(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java deleted file mode 100644 index 8fbc883108..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.util.ToolRunner; - -/** - * Test a java-based mapred job with LinuxTaskController running the jobs as a - * user different from the user running the cluster. See - * {@link ClusterWithLinuxTaskController} - */ -public class TestJobExecutionAsDifferentUser extends - ClusterWithLinuxTaskController { - - public void testJobExecution() - throws Exception { - if (!shouldRun()) { - return; - } - startCluster(); - - - jobOwner.doAs(new PrivilegedExceptionAction() { - public Object run() throws Exception { - Path inDir = new Path("input"); - Path outDir = new Path("output"); - - RunningJob job; - // Run a job with zero maps/reduces - job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 0, 0); - job.waitForCompletion(); - assertTrue("Job failed", job.isSuccessful()); - assertOwnerShip(outDir); - - // Run a job with 1 map and zero reduces - job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 1, 0); - job.waitForCompletion(); - assertTrue("Job failed", job.isSuccessful()); - assertOwnerShip(outDir); - - // Run a normal job with maps/reduces - job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 1, 1); - job.waitForCompletion(); - assertTrue("Job failed", job.isSuccessful()); - assertOwnerShip(outDir); - - // Run a job with jvm reuse - JobConf myConf = getClusterConf(); - myConf.set(JobContext.JVM_NUMTASKS_TORUN, "-1"); - String[] args = { "-m", "6", "-r", "3", "-mt", "1000", "-rt", "1000" }; - assertEquals(0, ToolRunner.run(myConf, new SleepJob(), args)); - return null; - } - }); - - } - - public void testEnvironment() throws Exception { - if (!shouldRun()) { - return; - } - startCluster(); - jobOwner.doAs(new PrivilegedExceptionAction() { - public Object run() throws Exception { - - TestMiniMRChildTask childTask = new TestMiniMRChildTask(); - Path inDir = new Path("input1"); - Path outDir = new Path("output1"); - try { - childTask.runTestTaskEnv(getClusterConf(), inDir, outDir, false); - } catch (IOException e) { - fail("IOException thrown while running enviroment test." - + e.getMessage()); - } finally { - FileSystem outFs = outDir.getFileSystem(getClusterConf()); - if (outFs.exists(outDir)) { - assertOwnerShip(outDir); - outFs.delete(outDir, true); - } else { - fail("Output directory does not exist" + outDir.toString()); - } - return null; - } - } - }); - } - - /** Ensure that SIGQUIT can be properly sent by the LinuxTaskController - * if a task times out. - */ - public void testTimeoutStackTrace() throws Exception { - if (!shouldRun()) { - return; - } - - // Run a job that should timeout and trigger a SIGQUIT. - startCluster(); - jobOwner.doAs(new PrivilegedExceptionAction() { - public Object run() throws Exception { - JobConf conf = getClusterConf(); - conf.setInt(JobContext.TASK_TIMEOUT, 10000); - conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50); - SleepJob sleepJob = new SleepJob(); - sleepJob.setConf(conf); - Job job = sleepJob.createJob(1, 0, 30000, 1, 0, 0); - job.setMaxMapAttempts(1); - int prevNumSigQuits = MyLinuxTaskController.attemptedSigQuits; - job.waitForCompletion(true); - assertTrue("Did not detect a new SIGQUIT!", - prevNumSigQuits < MyLinuxTaskController.attemptedSigQuits); - assertEquals("A SIGQUIT attempt failed!", 0, - MyLinuxTaskController.failedSigQuits); - return null; - } - }); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java index 490ee4cb45..29faa5dc0e 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java @@ -59,7 +59,7 @@ public void setUp() { } @After - public void tearDown() throws IOException { + public void tearDown() { FileUtil.fullyDelete(TEST_DIR); } diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java deleted file mode 100644 index ce32aa8a03..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.security.PrivilegedExceptionAction; - -/** - * Test killing of child processes spawned by the jobs with LinuxTaskController - * running the jobs as a user different from the user running the cluster. - * See {@link ClusterWithLinuxTaskController} - */ - -public class TestKillSubProcessesWithLinuxTaskController extends - ClusterWithLinuxTaskController { - - public void testKillSubProcess() throws Exception{ - if(!shouldRun()) { - return; - } - startCluster(); - jobOwner.doAs(new PrivilegedExceptionAction() { - public Object run() throws Exception { - JobConf myConf = getClusterConf(); - JobTracker jt = mrCluster.getJobTrackerRunner().getJobTracker(); - - TestKillSubProcesses.mr = mrCluster; - TestKillSubProcesses sbProc = new TestKillSubProcesses(); - sbProc.runTests(myConf, jt); - return null; - } - }); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java index 472da68c8a..17bf9cff25 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java @@ -18,9 +18,7 @@ package org.apache.hadoop.mapred; import java.io.IOException; -import java.io.InputStreamReader; import java.io.OutputStreamWriter; -import java.io.Reader; import java.io.Writer; import junit.framework.TestCase; @@ -32,7 +30,6 @@ import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.lib.IdentityMapper; import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.tools.ant.util.FileUtils; import org.junit.Test; public class TestLineRecordReader extends TestCase { @@ -66,10 +63,7 @@ public void createInputFile(Configuration conf) throws IOException { public String readOutputFile(Configuration conf) throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Path file = new Path(outputDir, "part-00000"); - Reader reader = new InputStreamReader(localFs.open(file)); - String r = FileUtils.readFully(reader); - reader.close(); - return r; + return UtilsForTests.slurpHadoop(file, localFs); } /** diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java deleted file mode 100644 index b47e360128..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.security.Groups; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import junit.framework.TestCase; - -public class TestLinuxTaskController extends TestCase { - private static int INVALID_TASKCONTROLLER_PERMISSIONS = 24; - private static File testDir = new File(System.getProperty("test.build.data", - "/tmp"), TestLinuxTaskController.class.getName()); - private static String taskControllerPath = System - .getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_PATH); - - @Before - protected void setUp() throws Exception { - testDir.mkdirs(); - } - - @After - protected void tearDown() throws Exception { - FileUtil.fullyDelete(testDir); - } - - public static class MyLinuxTaskController extends LinuxTaskController { - String taskControllerExePath = taskControllerPath + "/task-controller"; - - @Override - protected String getTaskControllerExecutablePath() { - return taskControllerExePath; - } - } - - private void validateTaskControllerSetup(TaskController controller, - boolean shouldFail) throws IOException { - if (shouldFail) { - // task controller setup should fail validating permissions. - Throwable th = null; - try { - controller.setup(); - } catch (IOException ie) { - th = ie; - } - assertNotNull("No exception during setup", th); - assertTrue("Exception message does not contain exit code" - + INVALID_TASKCONTROLLER_PERMISSIONS, th.getMessage().contains( - "with exit code " + INVALID_TASKCONTROLLER_PERMISSIONS)); - } else { - controller.setup(); - } - - } - - @Test - public void testTaskControllerGroup() throws Exception { - if (!ClusterWithLinuxTaskController.isTaskExecPathPassed()) { - return; - } - // cleanup configuration file. - ClusterWithLinuxTaskController - .getTaskControllerConfFile(taskControllerPath).delete(); - Configuration conf = new Configuration(); - // create local dirs and set in the conf. - File mapredLocal = new File(testDir, "mapred/local"); - mapredLocal.mkdirs(); - conf.set(MRConfig.LOCAL_DIR, mapredLocal.toString()); - - // setup task-controller without setting any group name - TaskController controller = new MyLinuxTaskController(); - controller.setConf(conf); - validateTaskControllerSetup(controller, true); - - // set an invalid group name for the task controller group - conf.set(TTConfig.TT_GROUP, "invalid"); - // write the task-controller's conf file - ClusterWithLinuxTaskController.createTaskControllerConf(taskControllerPath, - conf); - validateTaskControllerSetup(controller, true); - - conf.set(TTConfig.TT_GROUP, - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - // write the task-controller's conf file - ClusterWithLinuxTaskController.createTaskControllerConf(taskControllerPath, - conf); - validateTaskControllerSetup(controller, false); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java deleted file mode 100644 index 3f13f4db65..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java +++ /dev/null @@ -1,240 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.ClusterWithLinuxTaskController.MyLinuxTaskController; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * Test to verify localization of a job and localization of a task on a - * TaskTracker when {@link LinuxTaskController} is used. - * - */ -public class TestLocalizationWithLinuxTaskController extends - TestTaskTrackerLocalization { - - private static final Log LOG = - LogFactory.getLog(TestLocalizationWithLinuxTaskController.class); - - private File configFile; - private static String taskTrackerUserName; - - @Override - protected boolean canRun() { - return ClusterWithLinuxTaskController.shouldRun(); - } - - @Override - protected void setUp() - throws Exception { - - if (!canRun()) { - return; - } - - super.setUp(); - - taskTrackerUserName = UserGroupInformation.getLoginUser() - .getShortUserName(); - } - - @Override - protected void tearDown() - throws Exception { - if (!canRun()) { - return; - } - super.tearDown(); - if (configFile != null) { - configFile.delete(); - } - } - - protected TaskController createTaskController() { - return new MyLinuxTaskController(); - } - - protected UserGroupInformation getJobOwner() { - String ugi = System - .getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_UGI); - String[] splits = ugi.split(","); - return UserGroupInformation.createUserForTesting(splits[0], - new String[] { splits[1] }); - } - - /** @InheritDoc */ - @Override - public void testTaskControllerSetup() { - // Do nothing. - } - - @Override - protected void checkUserLocalization() - throws IOException { - // Check the directory structure and permissions - for (String dir : localDirs) { - - File localDir = new File(dir); - assertTrue(MRConfig.LOCAL_DIR + localDir + " isn'task created!", - localDir.exists()); - - File taskTrackerSubDir = new File(localDir, TaskTracker.SUBDIR); - assertTrue("taskTracker sub-dir in the local-dir " + localDir - + "is not created!", taskTrackerSubDir.exists()); - - // user-dir, jobcache and distcache will have - // 2770 permissions if jobOwner is same as tt_user - // 2570 permissions for any other user - String expectedDirPerms = taskTrackerUserName.equals(task.getUser()) - ? "drwxrws---" - : "dr-xrws---"; - - File userDir = new File(taskTrackerSubDir, task.getUser()); - assertTrue("user-dir in taskTrackerSubdir " + taskTrackerSubDir - + "is not created!", userDir.exists()); - - checkFilePermissions(userDir.getAbsolutePath(), expectedDirPerms, task - .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - - File jobCache = new File(userDir, TaskTracker.JOBCACHE); - assertTrue("jobcache in the userDir " + userDir + " isn't created!", - jobCache.exists()); - - checkFilePermissions(jobCache.getAbsolutePath(), expectedDirPerms, task - .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - - // Verify the distributed cache dir. - File distributedCacheDir = - new File(localDir, TaskTracker - .getPrivateDistributedCacheDir(task.getUser())); - assertTrue("distributed cache dir " + distributedCacheDir - + " doesn't exists!", distributedCacheDir.exists()); - checkFilePermissions(distributedCacheDir.getAbsolutePath(), - expectedDirPerms, task.getUser(), - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - } - } - - @Override - protected void checkJobLocalization() - throws IOException { - // job-dir, jars-dir and subdirectories in them will have - // 2770 permissions if jobOwner is same as tt_user - // 2570 permissions for any other user - // Files under these dirs will have - // 770 permissions if jobOwner is same as tt_user - // 570 permissions for any other user - String expectedDirPerms = taskTrackerUserName.equals(task.getUser()) - ? "drwxrws---" - : "dr-xrws---"; - String expectedFilePerms = taskTrackerUserName.equals(task.getUser()) - ? "-rwxrwx---" - : "-r-xrwx---"; - - for (String localDir : trackerFConf.getStrings(MRConfig.LOCAL_DIR)) { - File jobDir = - new File(localDir, TaskTracker.getLocalJobDir(task.getUser(), jobId - .toString())); - // check the private permissions on the job directory - checkFilePermissions(jobDir.getAbsolutePath(), expectedDirPerms, task - .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - } - - // check the private permissions of various directories - List dirs = new ArrayList(); - Path jarsDir = - lDirAlloc.getLocalPathToRead(TaskTracker.getJobJarsDir(task.getUser(), - jobId.toString()), trackerFConf); - dirs.add(jarsDir); - dirs.add(new Path(jarsDir, "lib")); - for (Path dir : dirs) { - checkFilePermissions(dir.toUri().getPath(), expectedDirPerms, - task.getUser(), - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - } - - // job-work dir needs user writable permissions i.e. 2770 for any user - Path jobWorkDir = - lDirAlloc.getLocalPathToRead(TaskTracker.getJobWorkDir(task.getUser(), - jobId.toString()), trackerFConf); - checkFilePermissions(jobWorkDir.toUri().getPath(), "drwxrws---", task - .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - - // check the private permissions of various files - List files = new ArrayList(); - files.add(lDirAlloc.getLocalPathToRead(TaskTracker.getLocalJobConfFile( - task.getUser(), jobId.toString()), trackerFConf)); - files.add(lDirAlloc.getLocalPathToRead(TaskTracker.getJobJarFile(task - .getUser(), jobId.toString()), trackerFConf)); - files.add(new Path(jarsDir, "lib" + Path.SEPARATOR + "lib1.jar")); - files.add(new Path(jarsDir, "lib" + Path.SEPARATOR + "lib2.jar")); - for (Path file : files) { - checkFilePermissions(file.toUri().getPath(), expectedFilePerms, task - .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - } - - // check job user-log directory permissions - File jobLogDir = TaskLog.getJobDir(jobId); - checkFilePermissions(jobLogDir.toString(), expectedDirPerms, task.getUser(), - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - // check job-acls.xml file permissions - checkFilePermissions(jobLogDir.toString() + Path.SEPARATOR - + TaskTracker.jobACLsFile, expectedFilePerms, task.getUser(), - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - - // validate the content of job ACLs file - validateJobACLsFileContent(); - } - - @Override - protected void checkTaskLocalization() - throws IOException { - // check the private permissions of various directories - List dirs = new ArrayList(); - dirs.add(lDirAlloc.getLocalPathToRead(TaskTracker.getLocalTaskDir(task - .getUser(), jobId.toString(), taskId.toString(), - task.isTaskCleanupTask()), trackerFConf)); - dirs.add(attemptWorkDir); - dirs.add(new Path(attemptWorkDir, "tmp")); - dirs.add(new Path(attemptLogFiles[1].getParentFile().getAbsolutePath())); - for (Path dir : dirs) { - checkFilePermissions(dir.toUri().getPath(), "drwxrws---", - task.getUser(), - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - } - - // check the private permissions of various files - List files = new ArrayList(); - files.add(lDirAlloc.getLocalPathToRead(TaskTracker.getTaskConfFile(task - .getUser(), task.getJobID().toString(), task.getTaskID().toString(), - task.isTaskCleanupTask()), trackerFConf)); - for (Path file : files) { - checkFilePermissions(file.toUri().getPath(), "-rwxrwx---", task - .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java index 97e256ced1..65ae794cee 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java @@ -33,7 +33,7 @@ public class TestTaskOutputSize { "/tmp"), "test"); @After - public void tearDown() throws Exception { + public void tearDown() { FileUtil.fullyDelete(new File(rootDir.toString())); } diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java deleted file mode 100644 index 5ebd6ec0f2..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.ClusterWithLinuxTaskController.MyLinuxTaskController; -import org.apache.hadoop.mapreduce.filecache.TestTrackerDistributedCacheManager; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * Test the DistributedCacheManager when LinuxTaskController is used. - * - */ -public class TestTrackerDistributedCacheManagerWithLinuxTaskController extends - TestTrackerDistributedCacheManager { - - private File configFile; - - private static final Log LOG = - LogFactory - .getLog(TestTrackerDistributedCacheManagerWithLinuxTaskController.class); - - @Override - protected void setUp() - throws IOException, InterruptedException { - - if (!ClusterWithLinuxTaskController.shouldRun()) { - return; - } - - TEST_ROOT_DIR = - new File(System.getProperty("test.build.data", "/tmp"), - TestTrackerDistributedCacheManagerWithLinuxTaskController.class - .getSimpleName()).getAbsolutePath(); - - super.setUp(); - - taskController = new MyLinuxTaskController(); - String path = - System.getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_PATH); - String execPath = path + "/task-controller"; - ((MyLinuxTaskController)taskController).setTaskControllerExe(execPath); - taskController.setConf(conf); - taskController.setup(); - } - - @Override - protected void refreshConf(Configuration conf) throws IOException { - super.refreshConf(conf); - String path = - System.getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_PATH); - configFile = - ClusterWithLinuxTaskController.createTaskControllerConf(path, conf); - - } - - @Override - protected void tearDown() - throws IOException { - if (!ClusterWithLinuxTaskController.shouldRun()) { - return; - } - if (configFile != null) { - configFile.delete(); - } - super.tearDown(); - } - - @Override - protected boolean canRun() { - return ClusterWithLinuxTaskController.shouldRun(); - } - - @Override - protected String getJobOwnerName() { - String ugi = - System.getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_UGI); - String userName = ugi.split(",")[0]; - return userName; - } - - @Override - protected void checkFilePermissions(Path[] localCacheFiles) - throws IOException { - String userName = getJobOwnerName(); - String filePermissions = UserGroupInformation.getLoginUser() - .getShortUserName().equals(userName) ? "-rwxrwx---" : "-r-xrwx---"; - - for (Path p : localCacheFiles) { - // First make sure that the cache file has proper permissions. - TestTaskTrackerLocalization.checkFilePermissions(p.toUri().getPath(), - filePermissions, userName, - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - // Now. make sure that all the path components also have proper - // permissions. - checkPermissionOnPathComponents(p.toUri().getPath(), userName); - } - - } - - /** - * @param cachedFilePath - * @param userName - * @throws IOException - */ - private void checkPermissionOnPathComponents(String cachedFilePath, - String userName) - throws IOException { - // The trailing distcache/file/... string - String trailingStringForFirstFile = - cachedFilePath.replaceFirst(ROOT_MAPRED_LOCAL_DIR.getAbsolutePath() - + Path.SEPARATOR + "0_[0-" + (numLocalDirs - 1) + "]" - + Path.SEPARATOR + TaskTracker.getPrivateDistributedCacheDir(userName), - ""); - LOG.info("Trailing path for cacheFirstFile is : " - + trailingStringForFirstFile); - // The leading mapreduce.cluster.local.dir/0_[0-n]/taskTracker/$user string. - String leadingStringForFirstFile = - cachedFilePath.substring(0, cachedFilePath - .lastIndexOf(trailingStringForFirstFile)); - LOG.info("Leading path for cacheFirstFile is : " - + leadingStringForFirstFile); - - String dirPermissions = UserGroupInformation.getLoginUser() - .getShortUserName().equals(userName) ? "drwxrws---" : "dr-xrws---"; - - // Now check path permissions, starting with cache file's parent dir. - File path = new File(cachedFilePath).getParentFile(); - while (!path.getAbsolutePath().equals(leadingStringForFirstFile)) { - TestTaskTrackerLocalization.checkFilePermissions(path.getAbsolutePath(), - dirPermissions, userName, - ClusterWithLinuxTaskController.taskTrackerSpecialGroup); - path = path.getParentFile(); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java index 1707305c47..1592025d06 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java @@ -61,7 +61,7 @@ public TestUserLogCleanup() throws IOException { } @After - public void tearDown() throws IOException { + public void tearDown() { FileUtil.fullyDelete(TaskLog.getUserLogDir()); } diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java index aa5f47ee2d..fc3c61720a 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java @@ -172,7 +172,7 @@ public static String slurp(File f) throws IOException { return contents; } - static String slurpHadoop(Path p, FileSystem fs) throws IOException { + public static String slurpHadoop(Path p, FileSystem fs) throws IOException { int len = (int) fs.getFileStatus(p).getLen(); byte[] buf = new byte[len]; InputStream in = fs.open(p); diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java deleted file mode 100644 index d514bf8ff0..0000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred.pipes; - -import java.security.PrivilegedExceptionAction; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.ClusterWithLinuxTaskController; -import org.apache.hadoop.mapred.JobConf; - -/** - * Test Pipes jobs with LinuxTaskController running the jobs as a user different - * from the user running the cluster. See {@link ClusterWithLinuxTaskController} - */ -public class TestPipesAsDifferentUser extends ClusterWithLinuxTaskController { - - private static final Log LOG = - LogFactory.getLog(TestPipesAsDifferentUser.class); - - public void testPipes() - throws Exception { - if (System.getProperty("compile.c++") == null) { - LOG.info("compile.c++ is not defined, so skipping TestPipes"); - return; - } - - if (!shouldRun()) { - return; - } - - super.startCluster(); - jobOwner.doAs(new PrivilegedExceptionAction() { - public Object run() throws Exception { - JobConf clusterConf = getClusterConf(); - Path inputPath = new Path(homeDirectory, "in"); - Path outputPath = new Path(homeDirectory, "out"); - - TestPipes.writeInputFile(FileSystem.get(clusterConf), inputPath); - TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple, - inputPath, outputPath, 3, 2, TestPipes.twoSplitOutput, clusterConf); - assertOwnerShip(outputPath); - TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath); - - TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple, - inputPath, outputPath, 3, 0, TestPipes.noSortOutput, clusterConf); - assertOwnerShip(outputPath); - TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath); - - TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountPart, - inputPath, outputPath, 3, 2, TestPipes.fixedPartitionOutput, - clusterConf); - assertOwnerShip(outputPath); - TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath); - - TestPipes.runNonPipedProgram(mrCluster, dfsCluster, - TestPipes.wordCountNoPipes, clusterConf); - assertOwnerShip(TestPipes.nonPipedOutDir, FileSystem - .getLocal(clusterConf)); - return null; - } - }); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java index 55937b2508..dd51ab3907 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java @@ -19,9 +19,7 @@ package org.apache.hadoop.mapreduce.lib.input; import java.io.IOException; -import java.io.InputStreamReader; import java.io.OutputStreamWriter; -import java.io.Reader; import java.io.Writer; import junit.framework.TestCase; @@ -29,11 +27,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.UtilsForTests; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.tools.ant.util.FileUtils; import org.junit.Test; public class TestLineRecordReader extends TestCase { @@ -67,10 +65,7 @@ public void createInputFile(Configuration conf) throws IOException { public String readOutputFile(Configuration conf) throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Path file = new Path(outputDir, "part-r-00000"); - Reader reader = new InputStreamReader(localFs.open(file)); - String r = FileUtils.readFully(reader); - reader.close(); - return r; + return UtilsForTests.slurpHadoop(file, localFs); } /** diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java index 87ebd7363a..3e05962d86 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java @@ -43,7 +43,7 @@ public class TestMRAsyncDiskService extends TestCase { "test.build.data", "/tmp")).toString(); @Override - protected void setUp() throws Exception { + protected void setUp() { FileUtil.fullyDelete(new File(TEST_ROOT_DIR)); } diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java index fb02e074bf..54e1302587 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java @@ -592,8 +592,7 @@ public void testProcessTreeDump() * @param procfsRootDir root directory to create. * @throws IOException if could not delete the procfs root directory */ - public static void setupProcfsRootDir(File procfsRootDir) - throws IOException { + public static void setupProcfsRootDir(File procfsRootDir) { // cleanup any existing process root dir. if (procfsRootDir.exists()) { assertTrue(FileUtil.fullyDelete(procfsRootDir)); diff --git a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java b/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java index cc72949d39..b8469fa18c 100644 --- a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java +++ b/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java @@ -23,6 +23,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doThrow; import java.io.IOException; import java.util.Arrays; @@ -80,6 +81,29 @@ public void testFailedJob() throws Exception { jobControl.stop(); } + + @Test + public void testErrorWhileSubmitting() throws Exception { + JobControl jobControl = new JobControl("Test"); + + Job mockJob = mock(Job.class); + + ControlledJob job1 = new ControlledJob(mockJob, null); + when(mockJob.getConfiguration()).thenReturn(new Configuration()); + doThrow(new IncompatibleClassChangeError("This is a test")).when(mockJob).submit(); + + jobControl.addJob(job1); + + runJobControl(jobControl); + try { + assertEquals("Success list", 0, jobControl.getSuccessfulJobList().size()); + assertEquals("Failed list", 1, jobControl.getFailedJobList().size()); + + assertTrue(job1.getJobState() == ControlledJob.State.FAILED); + } finally { + jobControl.stop(); + } + } @Test public void testKillJob() throws Exception { diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e9efa35028..a1114a1366 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -74,6 +74,8 @@ Apache Development Snapshot Repository https://repository.apache.org/content/repositories/snapshots + + 1.0.3 @@ -106,7 +108,7 @@ org.apache.hadoop - hadoop-alfredo + hadoop-auth ${project.version} @@ -166,6 +168,28 @@ jetty-util 6.1.26 + + + asm + asm + 3.2 + + + com.sun.jersey + jersey-core + 1.8 + + + com.sun.jersey + jersey-json + 1.8 + + + com.sun.jersey + jersey-server + 1.8 + + tomcat jasper-compiler @@ -320,7 +344,7 @@ org.codehaus.jackson jackson-mapper-asl - 1.6.9 + 1.7.1 org.aspectj @@ -340,12 +364,12 @@ org.apache.avro avro - 1.5.2 + 1.5.3 org.apache.avro avro-ipc - 1.5.2 + 1.5.3 net.sf.kosmosfs @@ -365,7 +389,7 @@ commons-daemon commons-daemon - 1.0.1 + ${commons-daemon.version} com.jcraft @@ -471,7 +495,7 @@ org.apache.avro avro-maven-plugin - 1.5.2 + 1.5.3 org.codehaus.mojo.jspc @@ -488,6 +512,11 @@ maven-project-info-reports-plugin 2.4 + + org.codehaus.mojo + exec-maven-plugin + 1.2 +