diff --git a/.gitignore b/.gitignore
index db58f6af6a..6ece6ca669 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,5 @@ hadoop-hdfs-project/hadoop-hdfs/downloads
hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
+yarnregistry.pdf
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 79622a2525..612781a3cd 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -220,6 +220,11 @@
test-jar
+
+ org.apache.hadoop
+ hadoop-yarn-registry
+ ${project.version}
+ org.apache.hadoophadoop-yarn-server-nodemanager
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0eb05c65d4..872eb39867 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -142,6 +142,9 @@ Release 2.6.0 - UNRELEASED
YARN-1051. Add a system for creating reservations of cluster capacity.
(see breakdown below)
+ YARN-913. Add a way to register long-lived services in a YARN cluster.
+ (stevel)
+
IMPROVEMENTS
YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
@@ -590,6 +593,10 @@ Release 2.6.0 - UNRELEASED
YARN-2649. Fixed TestAMRMRPCNodeUpdates test failure. (Ming Ma via jianhe)
+ BREAKDOWN OF YARN-913 SUBTASKS AND RELATED JIRAS
+
+ YARN-2652 Add hadoop-yarn-registry package under hadoop-yarn. (stevel)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 0e6207be67..6e82af044f 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -363,4 +363,14 @@
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 79244ad95d..1db7939ba2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1363,4 +1363,130 @@
yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabledfalse
+
+
+
+
+
+ Is the registry enabled: does the RM start it up,
+ create the user and system paths, and purge
+ service records when containers, application attempts
+ and applications complete
+
+ hadoop.registry.rm.enabled
+ false
+
+
+
+
+
+ hadoop.registry.zk.root
+ /registry
+
+
+
+
+ Zookeeper session timeout in milliseconds
+
+ hadoop.registry.zk.session.timeout.ms
+ 60000
+
+
+
+
+ Zookeeper session timeout in milliseconds
+
+ hadoop.registry.zk.connection.timeout.ms
+ 15000
+
+
+
+
+ Zookeeper connection retry count before failing
+
+ hadoop.registry.zk.retry.times
+ 5
+
+
+
+
+
+ hadoop.registry.zk.retry.interval.ms
+ 1000
+
+
+
+
+ Zookeeper retry limit in milliseconds, during
+ exponential backoff: {@value}
+
+ This places a limit even
+ if the retry times and interval limit, combined
+ with the backoff policy, result in a long retry
+ period
+
+ hadoop.registry.zk.retry.ceiling.ms
+ 60000
+
+
+
+
+ List of hostname:port pairs defining the
+ zookeeper quorum binding for the registry
+
+ hadoop.registry.zk.quorum
+ localhost:2181
+
+
+
+
+ Key to set if the registry is secure. Turning it on
+ changes the permissions policy from "open access"
+ to restrictions on kerberos with the option of
+ a user adding one or more auth key pairs down their
+ own tree.
+
+ hadoop.registry.secure
+ false
+
+
+
+
+ A comma separated list of Zookeeper ACL identifiers with
+ system access to the registry in a secure cluster.
+
+ These are given full access to all entries.
+
+ If there is an "@" at the end of a SASL entry it
+ instructs the registry client to append the default kerberos domain.
+
+ hadoop.registry.system.acls
+ sasl:yarn@, sasl:mapred@, sasl:mapred@hdfs@
+
+
+
+
+ The kerberos realm: used to set the realm of
+ system principals which do not declare their realm,
+ and any other accounts that need the value.
+
+ If empty, the default realm of the running process
+ is used.
+
+ If neither are known and the realm is needed, then the registry
+ service/client will fail.
+
+ hadoop.registry.kerberos.realm
+
+
+
+
+
+ Key to define the JAAS context. Used in secure
+ mode
+
+ hadoop.registry.jaas.context
+ Client
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
new file mode 100644
index 0000000000..05d8f23c3a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -0,0 +1,218 @@
+
+
+
+
+ hadoop-yarn
+ org.apache.hadoop
+ 3.0.0-SNAPSHOT
+
+ 4.0.0
+ hadoop-yarn-registry
+ 3.0.0-SNAPSHOT
+ hadoop-yarn-registry
+
+
+
+ ${project.parent.basedir}
+
+
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-api
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-common
+
+
+
+ org.apache.hadoop
+ hadoop-common
+
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ test-jar
+ test
+
+
+
+
+ org.apache.hadoop
+ hadoop-minikdc
+ test
+
+
+
+ org.codehaus.jackson
+ jackson-core-asl
+ compile
+
+
+
+ org.codehaus.jackson
+ jackson-mapper-asl
+ compile
+
+
+
+ commons-httpclient
+ commons-httpclient
+ test
+
+
+
+ junit
+ junit
+ test
+
+
+
+ org.apache.curator
+ curator-framework
+
+
+
+ org.apache.curator
+ curator-test
+ test
+
+
+
+
+
+
+
+
+ ${basedir}/src/main/resources
+
+ yarn-version-info.properties
+
+ false
+
+
+ ${basedir}/src/main/resources
+
+ yarn-version-info.properties
+
+ true
+
+
+
+
+ org.apache.rat
+ apache-rat-plugin
+
+
+
+
+
+
+ org.apache.hadoop
+ hadoop-maven-plugins
+
+
+ version-info
+ generate-resources
+
+ version-info
+
+
+
+
+
+
+
+
+ maven-jar-plugin
+
+
+
+ test-jar
+
+ test-compile
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+ false
+ 900
+ -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError
+
+
+ ${hadoop.common.build.dir}
+
+ true
+ ${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib
+ 4
+
+
+
+
+
+ ${test.build.dir}
+ ${hadoop.tmp.dir}
+ ${test.build.data}
+ ${test.build.webapps}
+ ${test.cache.data}
+ ${hadoop.log.dir}
+ ${test.build.classes}
+
+ true
+ ${basedir}/src/test/resources/krb5.conf
+ ${java.security.egd}
+ ${require.test.libhadoop}
+
+
+ **/Test*.java
+
+
+ **/${test.exclude}.java
+ ${test.exclude.pattern}
+ **/Test*$*.java
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java
new file mode 100644
index 0000000000..863039e2e8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java
@@ -0,0 +1,445 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.cli;
+
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.*;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.List;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
+import org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.Endpoint;
+import org.apache.hadoop.registry.client.types.ProtocolTypes;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class RegistryCli extends Configured implements Tool {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RegistryCli.class);
+ protected final PrintStream sysout;
+ protected final PrintStream syserr;
+
+
+ private RegistryOperations registry;
+
+ static final String LS_USAGE = "ls pathName";
+ static final String RESOLVE_USAGE = "resolve pathName";
+ static final String BIND_USAGE =
+ "bind -inet -api apiName -p portNumber -h hostName pathName" + "\n"
+ + "bind -webui uriString -api apiName pathName" + "\n"
+ + "bind -rest uriString -api apiName pathName";
+ static final String MKNODE_USAGE = "mknode directoryName";
+ static final String RM_USAGE = "rm pathName";
+ static final String USAGE =
+ "\n" + LS_USAGE + "\n" + RESOLVE_USAGE + "\n" + BIND_USAGE + "\n" +
+ MKNODE_USAGE + "\n" + RM_USAGE;
+
+
+
+ public RegistryCli(PrintStream sysout, PrintStream syserr) {
+ super(new YarnConfiguration());
+ this.sysout = sysout;
+ this.syserr = syserr;
+ }
+
+
+ @SuppressWarnings("UseOfSystemOutOrSystemErr")
+ public static void main(String[] args) throws Exception {
+ RegistryCli cli = new RegistryCli(System.out, System.err);
+ int res = ToolRunner.run(cli, args);
+ System.exit(res);
+ }
+
+ private int usageError(String err, String usage) {
+ syserr.println("Error: " + err);
+ syserr.println("Usage: " + usage);
+ return -1;
+ }
+
+ private boolean validatePath(String path) {
+ if (!path.startsWith("/")) {
+ syserr.println("Path must start with /; given path was: " + path);
+ return false;
+ }
+ return true;
+ }
+ @Override
+ public int run(String[] args) throws Exception {
+ Preconditions.checkArgument(getConf() != null, "null configuration");
+ registry = RegistryOperationsFactory.createInstance(
+ new YarnConfiguration(getConf()));
+ registry.start();
+ if (args.length > 0) {
+ if (args[0].equals("ls")) {
+ return ls(args);
+ } else if (args[0].equals("resolve")) {
+ return resolve(args);
+ } else if (args[0].equals("bind")) {
+ return bind(args);
+ } else if (args[0].equals("mknode")) {
+ return mknode(args);
+ } else if (args[0].equals("rm")) {
+ return rm(args);
+ }
+ }
+ return usageError("Invalid command: " + args[0], USAGE);
+ }
+
+ @SuppressWarnings("unchecked")
+ public int ls(String [] args) {
+
+ Options lsOption = new Options();
+ CommandLineParser parser = new GnuParser();
+ try {
+ CommandLine line = parser.parse(lsOption, args);
+
+ List argsList = line.getArgList();
+ if (argsList.size() != 2) {
+ return usageError("ls requires exactly one path argument", LS_USAGE);
+ }
+ if (!validatePath(argsList.get(1)))
+ return -1;
+
+ try {
+ List children = registry.list(argsList.get(1));
+ for (String child : children) {
+ sysout.println(child);
+ }
+ return 0;
+
+ } catch (Exception e) {
+ syserr.println(analyzeException("ls", e, argsList));
+ }
+ return -1;
+ } catch (ParseException exp) {
+ return usageError("Invalid syntax " + exp, LS_USAGE);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public int resolve(String [] args) {
+ Options resolveOption = new Options();
+ CommandLineParser parser = new GnuParser();
+ try {
+ CommandLine line = parser.parse(resolveOption, args);
+
+ List argsList = line.getArgList();
+ if (argsList.size() != 2) {
+ return usageError("resolve requires exactly one path argument", RESOLVE_USAGE);
+ }
+ if (!validatePath(argsList.get(1)))
+ return -1;
+
+ try {
+ ServiceRecord record = registry.resolve(argsList.get(1));
+
+ for (Endpoint endpoint : record.external) {
+ if ((endpoint.protocolType.equals(ProtocolTypes.PROTOCOL_WEBUI))
+ || (endpoint.protocolType.equals(ProtocolTypes.PROTOCOL_REST))) {
+ sysout.print(" Endpoint(ProtocolType="
+ + endpoint.protocolType + ", Api="
+ + endpoint.api + "); Uris are: ");
+ } else {
+ sysout.print(" Endpoint(ProtocolType="
+ + endpoint.protocolType + ", Api="
+ + endpoint.api + ");"
+ + " Addresses(AddressType="
+ + endpoint.addressType + ") are: ");
+
+ }
+ for (List a : endpoint.addresses) {
+ sysout.print(a + " ");
+ }
+ sysout.println();
+ }
+ return 0;
+ } catch (Exception e) {
+ syserr.println(analyzeException("resolve", e, argsList));
+ }
+ return -1;
+ } catch (org.apache.commons.cli.ParseException exp) {
+ return usageError("Invalid syntax " + exp, RESOLVE_USAGE);
+ }
+
+ }
+
+ public int bind(String [] args) {
+ Option rest = OptionBuilder.withArgName("rest")
+ .hasArg()
+ .withDescription("rest Option")
+ .create("rest");
+ Option webui = OptionBuilder.withArgName("webui")
+ .hasArg()
+ .withDescription("webui Option")
+ .create("webui");
+ Option inet = OptionBuilder.withArgName("inet")
+ .withDescription("inet Option")
+ .create("inet");
+ Option port = OptionBuilder.withArgName("port")
+ .hasArg()
+ .withDescription("port to listen on [9999]")
+ .create("p");
+ Option host = OptionBuilder.withArgName("host")
+ .hasArg()
+ .withDescription("host name")
+ .create("h");
+ Option apiOpt = OptionBuilder.withArgName("api")
+ .hasArg()
+ .withDescription("api")
+ .create("api");
+ Options inetOption = new Options();
+ inetOption.addOption(inet);
+ inetOption.addOption(port);
+ inetOption.addOption(host);
+ inetOption.addOption(apiOpt);
+
+ Options webuiOpt = new Options();
+ webuiOpt.addOption(webui);
+ webuiOpt.addOption(apiOpt);
+
+ Options restOpt = new Options();
+ restOpt.addOption(rest);
+ restOpt.addOption(apiOpt);
+
+
+ CommandLineParser parser = new GnuParser();
+ ServiceRecord sr = new ServiceRecord();
+ CommandLine line = null;
+ if (args.length <= 1) {
+ return usageError("Invalid syntax ", BIND_USAGE);
+ }
+ if (args[1].equals("-inet")) {
+ int portNum;
+ String hostName;
+ String api;
+
+ try {
+ line = parser.parse(inetOption, args);
+ } catch (ParseException exp) {
+ return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
+ }
+ if (line.hasOption("inet") && line.hasOption("p") &&
+ line.hasOption("h") && line.hasOption("api")) {
+ portNum = Integer.parseInt(line.getOptionValue("p"));
+ hostName = line.getOptionValue("h");
+ api = line.getOptionValue("api");
+ sr.addExternalEndpoint(
+ inetAddrEndpoint(api, ProtocolTypes.PROTOCOL_HADOOP_IPC, hostName,
+ portNum));
+
+ } else {
+ return usageError("Missing options: must have host, port and api",
+ BIND_USAGE);
+ }
+
+ } else if (args[1].equals("-webui")) {
+ try {
+ line = parser.parse(webuiOpt, args);
+ } catch (ParseException exp) {
+ return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
+ }
+ if (line.hasOption("webui") && line.hasOption("api")) {
+ URI theUri = null;
+ try {
+ theUri = new URI(line.getOptionValue("webui"));
+ } catch (URISyntaxException e) {
+ return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
+ }
+ sr.addExternalEndpoint(webEndpoint(line.getOptionValue("api"), theUri));
+
+ } else {
+ return usageError("Missing options: must have value for uri and api",
+ BIND_USAGE);
+ }
+ } else if (args[1].equals("-rest")) {
+ try {
+ line = parser.parse(restOpt, args);
+ } catch (ParseException exp) {
+ return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
+ }
+ if (line.hasOption("rest") && line.hasOption("api")) {
+ URI theUri = null;
+ try {
+ theUri = new URI(line.getOptionValue("rest"));
+ } catch (URISyntaxException e) {
+ return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
+ }
+ sr.addExternalEndpoint(
+ restEndpoint(line.getOptionValue("api"), theUri));
+
+ } else {
+ return usageError("Missing options: must have value for uri and api",
+ BIND_USAGE);
+ }
+
+ } else {
+ return usageError("Invalid syntax", BIND_USAGE);
+ }
+ @SuppressWarnings("unchecked")
+ List argsList = line.getArgList();
+ if (argsList.size() != 2) {
+ return usageError("bind requires exactly one path argument", BIND_USAGE);
+ }
+ if (!validatePath(argsList.get(1)))
+ return -1;
+
+ try {
+ registry.bind(argsList.get(1), sr, BindFlags.OVERWRITE);
+ return 0;
+ } catch (Exception e) {
+ syserr.println(analyzeException("bind", e, argsList));
+ }
+
+ return -1;
+ }
+
+ @SuppressWarnings("unchecked")
+ public int mknode(String [] args) {
+ Options mknodeOption = new Options();
+ CommandLineParser parser = new GnuParser();
+ try {
+ CommandLine line = parser.parse(mknodeOption, args);
+
+ List argsList = line.getArgList();
+ if (argsList.size() != 2) {
+ return usageError("mknode requires exactly one path argument", MKNODE_USAGE);
+ }
+ if (!validatePath(argsList.get(1)))
+ return -1;
+
+ try {
+ registry.mknode(args[1], false);
+ return 0;
+ } catch (Exception e) {
+ syserr.println(analyzeException("mknode", e, argsList));
+ }
+ return -1;
+ } catch (ParseException exp) {
+ return usageError("Invalid syntax " + exp.toString(), MKNODE_USAGE);
+ }
+ }
+
+
+ @SuppressWarnings("unchecked")
+ public int rm(String[] args) {
+ Option recursive = OptionBuilder.withArgName("recursive")
+ .withDescription("delete recursively").create("r");
+
+ Options rmOption = new Options();
+ rmOption.addOption(recursive);
+
+ boolean recursiveOpt = false;
+
+ CommandLineParser parser = new GnuParser();
+ try {
+ CommandLine line = parser.parse(rmOption, args);
+
+ List argsList = line.getArgList();
+ if (argsList.size() != 2) {
+ return usageError("RM requires exactly one path argument", RM_USAGE);
+ }
+ if (!validatePath(argsList.get(1)))
+ return -1;
+
+ try {
+ if (line.hasOption("r")) {
+ recursiveOpt = true;
+ }
+
+ registry.delete(argsList.get(1), recursiveOpt);
+ return 0;
+ } catch (Exception e) {
+ syserr.println(analyzeException("rm", e, argsList));
+ }
+ return -1;
+ } catch (ParseException exp) {
+ return usageError("Invalid syntax " + exp.toString(), RM_USAGE);
+ }
+ }
+
+ /**
+ * Given an exception and a possibly empty argument list, generate
+ * a diagnostics string for use in error messages
+ * @param operation the operation that failed
+ * @param e exception
+ * @param argsList arguments list
+ * @return a string intended for the user
+ */
+ String analyzeException(String operation,
+ Exception e,
+ List argsList) {
+
+ String pathArg = !argsList.isEmpty() ? argsList.get(1) : "(none)";
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Operation {} on path {} failed with exception {}",
+ operation, pathArg, e, e);
+ }
+ if (e instanceof InvalidPathnameException) {
+ return "InvalidPath :" + pathArg + ": " + e;
+ }
+ if (e instanceof PathNotFoundException) {
+ return "Path not found: " + pathArg;
+ }
+ if (e instanceof NoRecordException) {
+ return "No service record at path " + pathArg;
+ }
+ if (e instanceof AuthenticationFailedException) {
+ return "Failed to authenticate to registry : " + e;
+ }
+ if (e instanceof NoPathPermissionsException) {
+ return "No Permission to path: " + pathArg + ": " + e;
+ }
+ if (e instanceof AccessControlException) {
+ return "No Permission to path: " + pathArg + ": " + e;
+ }
+ if (e instanceof InvalidRecordException) {
+ return "Unable to read record at: " + pathArg + ": " + e;
+ }
+ if (e instanceof IOException) {
+ return "IO Exception when accessing path :" + pathArg + ": " + e;
+ }
+ // something else went very wrong here
+ return "Exception " + e;
+
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java
new file mode 100644
index 0000000000..5fd2aef5b5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Combinable Flags to use when creating a service entry.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface BindFlags {
+
+ /**
+ * Create the entry.. This is just "0" and can be "or"ed with anything
+ */
+ int CREATE = 0;
+
+ /**
+ * The entry should be created even if an existing entry is there.
+ */
+ int OVERWRITE = 1;
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
new file mode 100644
index 0000000000..a6fe216ec9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Constants for the registry, including configuration keys and default
+ * values.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface RegistryConstants {
+
+ /**
+ * prefix for registry configuration options: {@value}.
+ * Why hadoop. and not YARN? It can
+ * live outside YARN
+ */
+ String REGISTRY_PREFIX = "hadoop.registry.";
+
+ /**
+ * Prefix for zookeeper-specific options: {@value}
+ *
+ * For clients using other protocols, these options are not supported.
+ */
+ String ZK_PREFIX = REGISTRY_PREFIX + "zk.";
+
+ /**
+ * flag to indicate whether or not the registry should
+ * be enabled in the RM: {@value}
+ */
+ String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled";
+
+ /**
+ * Defaut value for enabling the registry in the RM: {@value}
+ */
+ boolean DEFAULT_REGISTRY_ENABLED = false;
+
+ /**
+ * Key to set if the registry is secure: {@value}.
+ * Turning it on changes the permissions policy from "open access"
+ * to restrictions on kerberos with the option of
+ * a user adding one or more auth key pairs down their
+ * own tree.
+ */
+ String KEY_REGISTRY_SECURE = REGISTRY_PREFIX + "secure";
+
+ /**
+ * Default registry security policy: {@value}.
+ */
+ boolean DEFAULT_REGISTRY_SECURE = false;
+
+ /**
+ * Root path in the ZK tree for the registry: {@value}
+ */
+ String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root";
+
+ /**
+ * Default root of the yarn registry: {@value}
+ */
+ String DEFAULT_ZK_REGISTRY_ROOT = "/registry";
+
+ /**
+ * Registry client authentication policy.
+ *
+ * This is only used in secure clusters.
+ *
+ * If the Factory methods of {@link RegistryOperationsFactory}
+ * are used, this key does not need to be set: it is set
+ * up based on the factory method used.
+ */
+ String KEY_REGISTRY_CLIENT_AUTH =
+ REGISTRY_PREFIX + "client.auth";
+
+ /**
+ * Registry client uses Kerberos: authentication is automatic from
+ * logged in user
+ */
+ String REGISTRY_CLIENT_AUTH_KERBEROS = "kerberos";
+
+ /**
+ * Username/password is the authentication mechanism.
+ * If set then both {@link #KEY_REGISTRY_CLIENT_AUTHENTICATION_ID}
+ * and {@link #KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD} must be set.
+ */
+ String REGISTRY_CLIENT_AUTH_DIGEST = "digest";
+
+ /**
+ * No authentication; client is anonymous
+ */
+ String REGISTRY_CLIENT_AUTH_ANONYMOUS = "";
+
+ /**
+ * Registry client authentication ID
+ *
+ * This is only used in secure clusters with
+ * {@link #KEY_REGISTRY_CLIENT_AUTH} set to
+ * {@link #REGISTRY_CLIENT_AUTH_DIGEST}
+ *
+ */
+ String KEY_REGISTRY_CLIENT_AUTHENTICATION_ID =
+ KEY_REGISTRY_CLIENT_AUTH + ".id";
+
+ /**
+ * Registry client authentication password.
+ *
+ * This is only used in secure clusters with the client set to
+ * use digest (not SASL or anonymouse) authentication.
+ *
+ * Specifically, {@link #KEY_REGISTRY_CLIENT_AUTH} set to
+ * {@link #REGISTRY_CLIENT_AUTH_DIGEST}
+ *
+ */
+ String KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD =
+ KEY_REGISTRY_CLIENT_AUTH + ".password";
+
+ /**
+ * List of hostname:port pairs defining the
+ * zookeeper quorum binding for the registry {@value}
+ */
+ String KEY_REGISTRY_ZK_QUORUM = ZK_PREFIX + "quorum";
+
+ /**
+ * The default zookeeper quorum binding for the registry: {@value}
+ */
+ String DEFAULT_REGISTRY_ZK_QUORUM = "localhost:2181";
+
+ /**
+ * Zookeeper session timeout in milliseconds: {@value}
+ */
+ String KEY_REGISTRY_ZK_SESSION_TIMEOUT =
+ ZK_PREFIX + "session.timeout.ms";
+
+ /**
+ * The default ZK session timeout: {@value}.
+ */
+ int DEFAULT_ZK_SESSION_TIMEOUT = 60000;
+
+ /**
+ * Zookeeper connection timeout in milliseconds: {@value}.
+ */
+ String KEY_REGISTRY_ZK_CONNECTION_TIMEOUT =
+ ZK_PREFIX + "connection.timeout.ms";
+
+ /**
+ * The default ZK connection timeout: {@value}.
+ */
+ int DEFAULT_ZK_CONNECTION_TIMEOUT = 15000;
+
+ /**
+ * Zookeeper connection retry count before failing: {@value}.
+ */
+ String KEY_REGISTRY_ZK_RETRY_TIMES = ZK_PREFIX + "retry.times";
+
+ /**
+ * The default # of times to retry a ZK connection: {@value}.
+ */
+ int DEFAULT_ZK_RETRY_TIMES = 5;
+
+ /**
+ * Zookeeper connect interval in milliseconds: {@value}.
+ */
+ String KEY_REGISTRY_ZK_RETRY_INTERVAL =
+ ZK_PREFIX + "retry.interval.ms";
+
+ /**
+ * The default interval between connection retries: {@value}.
+ */
+ int DEFAULT_ZK_RETRY_INTERVAL = 1000;
+
+ /**
+ * Zookeeper retry limit in milliseconds, during
+ * exponential backoff: {@value}.
+ *
+ * This places a limit even
+ * if the retry times and interval limit, combined
+ * with the backoff policy, result in a long retry
+ * period
+ *
+ */
+ String KEY_REGISTRY_ZK_RETRY_CEILING =
+ ZK_PREFIX + "retry.ceiling.ms";
+
+ /**
+ * Default limit on retries: {@value}.
+ */
+ int DEFAULT_ZK_RETRY_CEILING = 60000;
+
+ /**
+ * A comma separated list of Zookeeper ACL identifiers with
+ * system access to the registry in a secure cluster: {@value}.
+ *
+ * These are given full access to all entries.
+ *
+ * If there is an "@" at the end of an entry it
+ * instructs the registry client to append the kerberos realm as
+ * derived from the login and {@link #KEY_REGISTRY_KERBEROS_REALM}.
+ */
+ String KEY_REGISTRY_SYSTEM_ACCOUNTS = REGISTRY_PREFIX + "system.accounts";
+
+ /**
+ * Default system accounts given global access to the registry: {@value}.
+ */
+ String DEFAULT_REGISTRY_SYSTEM_ACCOUNTS =
+ "sasl:yarn@, sasl:mapred@, sasl:hdfs@, sasl:hadoop@";
+
+ /**
+ * A comma separated list of Zookeeper ACL identifiers with
+ * system access to the registry in a secure cluster: {@value}.
+ *
+ * These are given full access to all entries.
+ *
+ * If there is an "@" at the end of an entry it
+ * instructs the registry client to append the default kerberos domain.
+ */
+ String KEY_REGISTRY_USER_ACCOUNTS = REGISTRY_PREFIX + "user.accounts";
+
+ /**
+ * Default system acls: {@value}.
+ */
+ String DEFAULT_REGISTRY_USER_ACCOUNTS = "";
+
+ /**
+ * The kerberos realm: {@value}.
+ *
+ * This is used to set the realm of
+ * system principals which do not declare their realm,
+ * and any other accounts that need the value.
+ *
+ * If empty, the default realm of the running process
+ * is used.
+ *
+ * If neither are known and the realm is needed, then the registry
+ * service/client will fail.
+ */
+ String KEY_REGISTRY_KERBEROS_REALM = REGISTRY_PREFIX + "kerberos.realm";
+
+ /**
+ * Key to define the JAAS context. Used in secure registries: {@value}.
+ */
+ String KEY_REGISTRY_CLIENT_JAAS_CONTEXT = REGISTRY_PREFIX + "jaas.context";
+
+ /**
+ * default client-side registry JAAS context: {@value}
+ */
+ String DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT = "Client";
+
+ /**
+ * path to users off the root: {@value}.
+ */
+ String PATH_USERS = "/users/";
+
+ /**
+ * path to system services off the root : {@value}.
+ */
+ String PATH_SYSTEM_SERVICES = "/services/";
+
+ /**
+ * path to system services under a user's home path : {@value}.
+ */
+ String PATH_USER_SERVICES = "/services/";
+
+ /**
+ * path under a service record to point to components of that service:
+ * {@value}.
+ */
+ String SUBPATH_COMPONENTS = "/components/";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java
new file mode 100644
index 0000000000..c51bcf7465
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Registry Operations
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface RegistryOperations extends Service {
+
+ /**
+ * Create a path.
+ *
+ * It is not an error if the path exists already, be it empty or not.
+ *
+ * The createParents flag also requests creating the parents.
+ * As entries in the registry can hold data while still having
+ * child entries, it is not an error if any of the parent path
+ * elements have service records.
+ *
+ * @param path path to create
+ * @param createParents also create the parents.
+ * @throws PathNotFoundException parent path is not in the registry.
+ * @throws InvalidPathnameException path name is invalid.
+ * @throws IOException Any other IO Exception.
+ * @return true if the path was created, false if it existed.
+ */
+ boolean mknode(String path, boolean createParents)
+ throws PathNotFoundException,
+ InvalidPathnameException,
+ IOException;
+
+ /**
+ * Bind a path in the registry to a service record
+ * @param path path to service record
+ * @param record service record service record to create/update
+ * @param flags bind flags
+ * @throws PathNotFoundException the parent path does not exist
+ * @throws FileAlreadyExistsException path exists but create flags
+ * do not include "overwrite"
+ * @throws InvalidPathnameException path name is invalid.
+ * @throws IOException Any other IO Exception.
+ */
+ void bind(String path, ServiceRecord record, int flags)
+ throws PathNotFoundException,
+ FileAlreadyExistsException,
+ InvalidPathnameException,
+ IOException;
+
+ /**
+ * Resolve the record at a path
+ * @param path path to an entry containing a {@link ServiceRecord}
+ * @return the record
+ * @throws PathNotFoundException path is not in the registry.
+ * @throws NoRecordException if there is not a service record
+ * @throws InvalidRecordException if there was a service record but it could
+ * not be parsed.
+ * @throws IOException Any other IO Exception
+ */
+
+ ServiceRecord resolve(String path)
+ throws PathNotFoundException,
+ NoRecordException,
+ InvalidRecordException,
+ IOException;
+
+ /**
+ * Get the status of a path
+ * @param path path to query
+ * @return the status of the path
+ * @throws PathNotFoundException path is not in the registry.
+ * @throws InvalidPathnameException the path is invalid.
+ * @throws IOException Any other IO Exception
+ */
+ RegistryPathStatus stat(String path)
+ throws PathNotFoundException,
+ InvalidPathnameException,
+ IOException;
+
+ /**
+ * Probe for a path existing.
+ * This is equivalent to {@link #stat(String)} with
+ * any failure downgraded to a
+ * @param path path to query
+ * @return true if the path was found
+ * @throws IOException
+ */
+ boolean exists(String path) throws IOException;
+
+ /**
+ * List all entries under a registry path, returning the relative names
+ * of the entries.
+ * @param path path to query
+ * @return a possibly empty list of the short path names of
+ * child entries.
+ * @throws PathNotFoundException
+ * @throws InvalidPathnameException
+ * @throws IOException
+ */
+ List list(String path) throws
+ PathNotFoundException,
+ InvalidPathnameException,
+ IOException;
+
+ /**
+ * Delete a path.
+ *
+ * If the operation returns without an error then the entry has been
+ * deleted.
+ * @param path path delete recursively
+ * @param recursive recursive flag
+ * @throws PathNotFoundException path is not in the registry.
+ * @throws InvalidPathnameException the path is invalid.
+ * @throws PathIsNotEmptyDirectoryException path has child entries, but
+ * recursive is false.
+ * @throws IOException Any other IO Exception
+ *
+ */
+ void delete(String path, boolean recursive)
+ throws PathNotFoundException,
+ PathIsNotEmptyDirectoryException,
+ InvalidPathnameException,
+ IOException;
+
+ /**
+ * Add a new write access entry to be added to node permissions in all
+ * future write operations of a session connected to a secure registry.
+ *
+ * This does not grant the session any more rights: if it lacked any write
+ * access, it will still be unable to manipulate the registry.
+ *
+ * In an insecure cluster, this operation has no effect.
+ * @param id ID to use
+ * @param pass password
+ * @return true if the accessor was added: that is, the registry connection
+ * uses permissions to manage access
+ * @throws IOException on any failure to build the digest
+ */
+ boolean addWriteAccessor(String id, String pass) throws IOException;
+
+ /**
+ * Clear all write accessors.
+ *
+ * At this point all standard permissions/ACLs are retained,
+ * including any set on behalf of the user
+ * Only accessors added via {@link #addWriteAccessor(String, String)}
+ * are removed.
+ */
+ public void clearWriteAccessors();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java
new file mode 100644
index 0000000000..443654df37
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.api;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.registry.client.impl.RegistryOperationsClient;
+
+import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+
+/**
+ * A factory for registry operation service instances.
+ *
+ * Each created instance will be returned initialized.
+ *
+ * That is, the service will have had Service.init(conf) applied
+ * to it —possibly after the configuration has been modified to
+ * support the specific binding/security mechanism used
+ */
+public final class RegistryOperationsFactory {
+
+ private RegistryOperationsFactory() {
+ }
+
+ /**
+ * Create and initialize a registry operations instance.
+ * Access writes will be determined from the configuration
+ * @param conf configuration
+ * @return a registry operations instance
+ * @throws ServiceStateException on any failure to initialize
+ */
+ public static RegistryOperations createInstance(Configuration conf) {
+ return createInstance("RegistryOperations", conf);
+ }
+
+ /**
+ * Create and initialize a registry operations instance.
+ * Access rights will be determined from the configuration
+ * @param name name of the instance
+ * @param conf configuration
+ * @return a registry operations instance
+ * @throws ServiceStateException on any failure to initialize
+ */
+ public static RegistryOperations createInstance(String name, Configuration conf) {
+ Preconditions.checkArgument(conf != null, "Null configuration");
+ RegistryOperationsClient operations =
+ new RegistryOperationsClient(name);
+ operations.init(conf);
+ return operations;
+ }
+
+ /**
+ * Create and initialize an anonymous read/write registry operations instance.
+ * In a secure cluster, this instance will only have read access to the
+ * registry.
+ * @param conf configuration
+ * @return an anonymous registry operations instance
+ *
+ * @throws ServiceStateException on any failure to initialize
+ */
+ public static RegistryOperations createAnonymousInstance(Configuration conf) {
+ Preconditions.checkArgument(conf != null, "Null configuration");
+ conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_ANONYMOUS);
+ return createInstance("AnonymousRegistryOperations", conf);
+ }
+
+ /**
+ * Create and initialize an secure, Kerberos-authenticated instance.
+ *
+ * The user identity will be inferred from the current user
+ *
+ * The authentication of this instance will expire when any kerberos
+ * tokens needed to authenticate with the registry infrastructure expire.
+ * @param conf configuration
+ * @param jaasContext the JAAS context of the account.
+ * @return a registry operations instance
+ * @throws ServiceStateException on any failure to initialize
+ */
+ public static RegistryOperations createKerberosInstance(Configuration conf,
+ String jaasContext) {
+ Preconditions.checkArgument(conf != null, "Null configuration");
+ conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_KERBEROS);
+ conf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, jaasContext);
+ return createInstance("KerberosRegistryOperations", conf);
+ }
+
+ /**
+ * Create and initialize an operations instance authenticated with write
+ * access via an id:password pair.
+ *
+ * The instance will have the read access
+ * across the registry, but write access only to that part of the registry
+ * to which it has been give the relevant permissions.
+ * @param conf configuration
+ * @param id user ID
+ * @param password password
+ * @return a registry operations instance
+ * @throws ServiceStateException on any failure to initialize
+ * @throws IllegalArgumentException if an argument is invalid
+ */
+ public static RegistryOperations createAuthenticatedInstance(Configuration conf,
+ String id,
+ String password) {
+ Preconditions.checkArgument(!StringUtils.isEmpty(id), "empty Id");
+ Preconditions.checkArgument(!StringUtils.isEmpty(password), "empty Password");
+ Preconditions.checkArgument(conf != null, "Null configuration");
+ conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST);
+ conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, id);
+ conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, password);
+ return createInstance("DigestRegistryOperations", conf);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java
new file mode 100644
index 0000000000..f5f844eff9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * YARN Registry Client API.
+ *
+ * This package contains the core API for the YARN registry.
+ *
+ *
+ *
Data types can be found in
+ * {@link org.apache.hadoop.registry.client.types}
+ *
Exceptions are listed in
+ * {@link org.apache.hadoop.registry.client.exceptions}
+ *
Classes to assist use of the registry are in
+ * {@link org.apache.hadoop.registry.client.binding}
+ *
+ *
+ *
+ */
+package org.apache.hadoop.registry.client.api;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java
new file mode 100644
index 0000000000..e086e3694f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java
@@ -0,0 +1,327 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonProcessingException;
+import org.codehaus.jackson.map.DeserializationConfig;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Support for marshalling objects to and from JSON.
+ *
+ * It constructs an object mapper as an instance field.
+ * and synchronizes access to those methods
+ * which use the mapper
+ * @param Type to marshal.
+ */
+@InterfaceAudience.Private()
+@InterfaceStability.Evolving
+public class JsonSerDeser {
+
+ private static final Logger LOG = LoggerFactory.getLogger(JsonSerDeser.class);
+ private static final String UTF_8 = "UTF-8";
+ public static final String E_NO_SERVICE_RECORD = "No service record at path";
+
+ private final Class classType;
+ private final ObjectMapper mapper;
+ private final byte[] header;
+
+ /**
+ * Create an instance bound to a specific type
+ * @param classType class to marshall
+ * @param header byte array to use as header
+ */
+ public JsonSerDeser(Class classType, byte[] header) {
+ Preconditions.checkArgument(classType != null, "null classType");
+ Preconditions.checkArgument(header != null, "null header");
+ this.classType = classType;
+ this.mapper = new ObjectMapper();
+ mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES,
+ false);
+ // make an immutable copy to keep findbugs happy.
+ byte[] h = new byte[header.length];
+ System.arraycopy(header, 0, h, 0, header.length);
+ this.header = h;
+ }
+
+ public String getName() {
+ return classType.getSimpleName();
+ }
+
+ /**
+ * Convert from JSON
+ *
+ * @param json input
+ * @return the parsed JSON
+ * @throws IOException IO
+ * @throws JsonMappingException failure to map from the JSON to this class
+ */
+ @SuppressWarnings("unchecked")
+ public synchronized T fromJson(String json)
+ throws IOException, JsonParseException, JsonMappingException {
+ try {
+ return mapper.readValue(json, classType);
+ } catch (IOException e) {
+ LOG.error("Exception while parsing json : " + e + "\n" + json, e);
+ throw e;
+ }
+ }
+
+ /**
+ * Convert from a JSON file
+ * @param jsonFile input file
+ * @return the parsed JSON
+ * @throws IOException IO problems
+ * @throws JsonMappingException failure to map from the JSON to this class
+ */
+ @SuppressWarnings("unchecked")
+ public synchronized T fromFile(File jsonFile)
+ throws IOException, JsonParseException, JsonMappingException {
+ try {
+ return mapper.readValue(jsonFile, classType);
+ } catch (IOException e) {
+ LOG.error("Exception while parsing json file {}: {}", jsonFile, e);
+ throw e;
+ }
+ }
+
+ /**
+ * Convert from a JSON file
+ * @param resource input file
+ * @return the parsed JSON
+ * @throws IOException IO problems
+ * @throws JsonMappingException failure to map from the JSON to this class
+ */
+ @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed"})
+ public synchronized T fromResource(String resource)
+ throws IOException, JsonParseException, JsonMappingException {
+ InputStream resStream = null;
+ try {
+ resStream = this.getClass().getResourceAsStream(resource);
+ if (resStream == null) {
+ throw new FileNotFoundException(resource);
+ }
+ return mapper.readValue(resStream, classType);
+ } catch (IOException e) {
+ LOG.error("Exception while parsing json resource {}: {}", resource, e);
+ throw e;
+ } finally {
+ IOUtils.closeStream(resStream);
+ }
+ }
+
+ /**
+ * clone by converting to JSON and back again.
+ * This is much less efficient than any Java clone process.
+ * @param instance instance to duplicate
+ * @return a new instance
+ * @throws IOException problems.
+ */
+ public T fromInstance(T instance) throws IOException {
+ return fromJson(toJson(instance));
+ }
+
+ /**
+ * Load from a Hadoop filesystem
+ * @param fs filesystem
+ * @param path path
+ * @return a loaded CD
+ * @throws IOException IO problems
+ * @throws EOFException if not enough bytes were read in
+ * @throws JsonParseException parse problems
+ * @throws JsonMappingException O/J mapping problems
+ */
+ public T load(FileSystem fs, Path path)
+ throws IOException, JsonParseException, JsonMappingException {
+ FileStatus status = fs.getFileStatus(path);
+ long len = status.getLen();
+ byte[] b = new byte[(int) len];
+ FSDataInputStream dataInputStream = fs.open(path);
+ int count = dataInputStream.read(b);
+ if (count != len) {
+ throw new EOFException(path.toString() + ": read finished prematurely");
+ }
+ return fromBytes(path.toString(), b, 0);
+ }
+
+ /**
+ * Save a cluster description to a hadoop filesystem
+ * @param fs filesystem
+ * @param path path
+ * @param overwrite should any existing file be overwritten
+ * @throws IOException IO exception
+ */
+ public void save(FileSystem fs, Path path, T instance,
+ boolean overwrite) throws
+ IOException {
+ FSDataOutputStream dataOutputStream = fs.create(path, overwrite);
+ writeJsonAsBytes(instance, dataOutputStream);
+ }
+
+ /**
+ * Write the json as bytes -then close the file
+ * @param dataOutputStream an outout stream that will always be closed
+ * @throws IOException on any failure
+ */
+ private void writeJsonAsBytes(T instance,
+ DataOutputStream dataOutputStream) throws
+ IOException {
+ try {
+ byte[] b = toBytes(instance);
+ dataOutputStream.write(b);
+ } finally {
+ dataOutputStream.close();
+ }
+ }
+
+ /**
+ * Convert JSON To bytes
+ * @param instance instance to convert
+ * @return a byte array
+ * @throws IOException
+ */
+ public byte[] toBytes(T instance) throws IOException {
+ String json = toJson(instance);
+ return json.getBytes(UTF_8);
+ }
+
+ /**
+ * Convert JSON To bytes, inserting the header
+ * @param instance instance to convert
+ * @return a byte array
+ * @throws IOException
+ */
+ public byte[] toByteswithHeader(T instance) throws IOException {
+ byte[] body = toBytes(instance);
+
+ ByteBuffer buffer = ByteBuffer.allocate(body.length + header.length);
+ buffer.put(header);
+ buffer.put(body);
+ return buffer.array();
+ }
+
+ /**
+ * Deserialize from a byte array
+ * @param path path the data came from
+ * @param bytes byte array
+ * @return offset in the array to read from
+ * @throws IOException all problems
+ * @throws EOFException not enough data
+ * @throws InvalidRecordException if the parsing failed -the record is invalid
+ */
+ public T fromBytes(String path, byte[] bytes, int offset) throws IOException,
+ InvalidRecordException {
+ int data = bytes.length - offset;
+ if (data <= 0) {
+ throw new EOFException("No data at " + path);
+ }
+ String json = new String(bytes, offset, data, UTF_8);
+ try {
+ return fromJson(json);
+ } catch (JsonProcessingException e) {
+ throw new InvalidRecordException(path, e.toString(), e);
+ }
+ }
+
+ /**
+ * Read from a byte array to a type, checking the header first
+ * @param path source of data
+ * @param buffer buffer
+ * @return the parsed structure
+ * Null if the record was too short or the header did not match
+ * @throws IOException on a failure
+ * @throws NoRecordException if header checks implied there was no record
+ * @throws InvalidRecordException if record parsing failed
+ */
+ @SuppressWarnings("unchecked")
+ public T fromBytesWithHeader(String path, byte[] buffer) throws IOException {
+ int hlen = header.length;
+ int blen = buffer.length;
+ if (hlen > 0) {
+ if (blen < hlen) {
+ throw new NoRecordException(path, E_NO_SERVICE_RECORD);
+ }
+ byte[] magic = Arrays.copyOfRange(buffer, 0, hlen);
+ if (!Arrays.equals(header, magic)) {
+ LOG.debug("start of entry does not match service record header at {}",
+ path);
+ throw new NoRecordException(path, E_NO_SERVICE_RECORD);
+ }
+ }
+ return fromBytes(path, buffer, hlen);
+ }
+
+ /**
+ * Check if a buffer has a header which matches this record type
+ * @param buffer buffer
+ * @return true if there is a match
+ * @throws IOException
+ */
+ public boolean headerMatches(byte[] buffer) throws IOException {
+ int hlen = header.length;
+ int blen = buffer.length;
+ boolean matches = false;
+ if (blen > hlen) {
+ byte[] magic = Arrays.copyOfRange(buffer, 0, hlen);
+ matches = Arrays.equals(header, magic);
+ }
+ return matches;
+ }
+
+ /**
+ * Convert an object to a JSON string
+ * @param instance instance to convert
+ * @return a JSON string description
+ * @throws JsonParseException parse problems
+ * @throws JsonMappingException O/J mapping problems
+ */
+ public synchronized String toJson(T instance) throws IOException,
+ JsonGenerationException,
+ JsonMappingException {
+ mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
+ return mapper.writeValueAsString(instance);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
new file mode 100644
index 0000000000..5d8ea3f5b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
+import org.apache.zookeeper.common.PathUtils;
+
+import java.net.IDN;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Pattern;
+
+/**
+ * Basic operations on paths: manipulating them and creating and validating
+ * path elements.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RegistryPathUtils {
+
+ /**
+ * Compiled down pattern to validate single entries in the path
+ */
+ private static final Pattern PATH_ENTRY_VALIDATION_PATTERN =
+ Pattern.compile(RegistryInternalConstants.VALID_PATH_ENTRY_PATTERN);
+
+ /**
+ * Validate ZK path with the path itself included in
+ * the exception text
+ * @param path path to validate
+ * @return the path parameter
+ * @throws InvalidPathnameException if the pathname is invalid.
+ */
+ public static String validateZKPath(String path) throws
+ InvalidPathnameException {
+ try {
+ PathUtils.validatePath(path);
+
+ } catch (IllegalArgumentException e) {
+ throw new InvalidPathnameException(path,
+ "Invalid Path \"" + path + "\" : " + e, e);
+ }
+ return path;
+ }
+
+ /**
+ * Validate ZK path as valid for a DNS hostname.
+ * @param path path to validate
+ * @return the path parameter
+ * @throws InvalidPathnameException if the pathname is invalid.
+ */
+ public static String validateElementsAsDNS(String path) throws
+ InvalidPathnameException {
+ List splitpath = split(path);
+ for (String fragment : splitpath) {
+ if (!PATH_ENTRY_VALIDATION_PATTERN.matcher(fragment).matches()) {
+ throw new InvalidPathnameException(path,
+ "Invalid Path element \"" + fragment + "\"");
+ }
+ }
+ return path;
+ }
+
+ /**
+ * Create a full path from the registry root and the supplied subdir
+ * @param path path of operation
+ * @return an absolute path
+ * @throws InvalidPathnameException if the path is invalid
+ */
+ public static String createFullPath(String base, String path) throws
+ InvalidPathnameException {
+ Preconditions.checkArgument(path != null, "null path");
+ Preconditions.checkArgument(base != null, "null path");
+ return validateZKPath(join(base, path));
+ }
+
+ /**
+ * Join two paths, guaranteeing that there will not be exactly
+ * one separator between the two, and exactly one at the front
+ * of the path. There will be no trailing "/" except for the special
+ * case that this is the root path
+ * @param base base path
+ * @param path second path to add
+ * @return a combined path.
+ */
+ public static String join(String base, String path) {
+ Preconditions.checkArgument(path != null, "null path");
+ Preconditions.checkArgument(base != null, "null path");
+ StringBuilder fullpath = new StringBuilder();
+
+ if (!base.startsWith("/")) {
+ fullpath.append('/');
+ }
+ fullpath.append(base);
+
+ // guarantee a trailing /
+ if (!fullpath.toString().endsWith("/")) {
+ fullpath.append("/");
+ }
+ // strip off any at the beginning
+ if (path.startsWith("/")) {
+ // path starts with /, so append all other characters -if present
+ if (path.length() > 1) {
+ fullpath.append(path.substring(1));
+ }
+ } else {
+ fullpath.append(path);
+ }
+
+ //here there may be a trailing "/"
+ String finalpath = fullpath.toString();
+ if (finalpath.endsWith("/") && !"/".equals(finalpath)) {
+ finalpath = finalpath.substring(0, finalpath.length() - 1);
+
+ }
+ return finalpath;
+ }
+
+ /**
+ * split a path into elements, stripping empty elements
+ * @param path the path
+ * @return the split path
+ */
+ public static List split(String path) {
+ //
+ String[] pathelements = path.split("/");
+ List dirs = new ArrayList(pathelements.length);
+ for (String pathelement : pathelements) {
+ if (!pathelement.isEmpty()) {
+ dirs.add(pathelement);
+ }
+ }
+ return dirs;
+ }
+
+ /**
+ * Get the last entry in a path; for an empty path
+ * returns "". The split logic is that of
+ * {@link #split(String)}
+ * @param path path of operation
+ * @return the last path entry or "" if none.
+ */
+ public static String lastPathEntry(String path) {
+ List splits = split(path);
+ if (splits.isEmpty()) {
+ // empty path. Return ""
+ return "";
+ } else {
+ return splits.get(splits.size() - 1);
+ }
+ }
+
+ /**
+ * Get the parent of a path
+ * @param path path to look at
+ * @return the parent path
+ * @throws PathNotFoundException if the path was at root.
+ */
+ public static String parentOf(String path) throws PathNotFoundException {
+ List elements = split(path);
+
+ int size = elements.size();
+ if (size == 0) {
+ throw new PathNotFoundException("No parent of " + path);
+ }
+ if (size == 1) {
+ return "/";
+ }
+ elements.remove(size - 1);
+ StringBuilder parent = new StringBuilder(path.length());
+ for (String element : elements) {
+ parent.append("/");
+ parent.append(element);
+ }
+ return parent.toString();
+ }
+
+ /**
+ * Perform any formatting for the registry needed to convert
+ * non-simple-DNS elements
+ * @param element element to encode
+ * @return an encoded string
+ */
+ public static String encodeForRegistry(String element) {
+ return IDN.toASCII(element);
+ }
+
+ /**
+ * Perform whatever transforms are needed to get a YARN ID into
+ * a DNS-compatible name
+ * @param yarnId ID as string of YARN application, instance or container
+ * @return a string suitable for use in registry paths.
+ */
+ public static String encodeYarnID(String yarnId) {
+ return yarnId.replace("_", "-");
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java
new file mode 100644
index 0000000000..b4254a3beb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.types.AddressTypes;
+import org.apache.hadoop.registry.client.types.Endpoint;
+import org.apache.hadoop.registry.client.types.ProtocolTypes;
+
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Static methods to work with registry types —primarily endpoints and the
+ * list representation of addresses.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RegistryTypeUtils {
+
+ /**
+ * Create a URL endpoint from a list of URIs
+ * @param api implemented API
+ * @param protocolType protocol type
+ * @param uris URIs
+ * @return a new endpoint
+ */
+ public static Endpoint urlEndpoint(String api,
+ String protocolType,
+ URI... uris) {
+ return new Endpoint(api, protocolType, uris);
+ }
+
+ /**
+ * Create a REST endpoint from a list of URIs
+ * @param api implemented API
+ * @param uris URIs
+ * @return a new endpoint
+ */
+ public static Endpoint restEndpoint(String api,
+ URI... uris) {
+ return urlEndpoint(api, ProtocolTypes.PROTOCOL_REST, uris);
+ }
+
+ /**
+ * Create a Web UI endpoint from a list of URIs
+ * @param api implemented API
+ * @param uris URIs
+ * @return a new endpoint
+ */
+ public static Endpoint webEndpoint(String api,
+ URI... uris) {
+ return urlEndpoint(api, ProtocolTypes.PROTOCOL_WEBUI, uris);
+ }
+
+ /**
+ * Create an internet address endpoint from a list of URIs
+ * @param api implemented API
+ * @param protocolType protocol type
+ * @param hostname hostname/FQDN
+ * @param port port
+ * @return a new endpoint
+ */
+
+ public static Endpoint inetAddrEndpoint(String api,
+ String protocolType,
+ String hostname,
+ int port) {
+ Preconditions.checkArgument(api != null, "null API");
+ Preconditions.checkArgument(protocolType != null, "null protocolType");
+ Preconditions.checkArgument(hostname != null, "null hostname");
+ return new Endpoint(api,
+ AddressTypes.ADDRESS_HOSTNAME_AND_PORT,
+ protocolType,
+ tuplelist(hostname, Integer.toString(port)));
+ }
+
+ /**
+ * Create an IPC endpoint
+ * @param api API
+ * @param protobuf flag to indicate whether or not the IPC uses protocol
+ * buffers
+ * @param address the address as a tuple of (hostname, port)
+ * @return the new endpoint
+ */
+ public static Endpoint ipcEndpoint(String api,
+ boolean protobuf, List address) {
+ ArrayList> addressList = new ArrayList>();
+ if (address != null) {
+ addressList.add(address);
+ }
+ return new Endpoint(api,
+ AddressTypes.ADDRESS_HOSTNAME_AND_PORT,
+ protobuf ? ProtocolTypes.PROTOCOL_HADOOP_IPC_PROTOBUF
+ : ProtocolTypes.PROTOCOL_HADOOP_IPC,
+ addressList);
+ }
+
+ /**
+ * Create a single-element list of tuples from the input.
+ * that is, an input ("a","b","c") is converted into a list
+ * in the form [["a","b","c"]]
+ * @param t1 tuple elements
+ * @return a list containing a single tuple
+ */
+ public static List> tuplelist(String... t1) {
+ List> outer = new ArrayList>();
+ outer.add(tuple(t1));
+ return outer;
+ }
+
+ /**
+ * Create a tuples from the input.
+ * that is, an input ("a","b","c") is converted into a list
+ * in the form ["a","b","c"]
+ * @param t1 tuple elements
+ * @return a single tuple as a list
+ */
+ public static List tuple(String... t1) {
+ return Arrays.asList(t1);
+ }
+
+ /**
+ * Create a tuples from the input, converting all to Strings in the process
+ * that is, an input ("a", 7, true) is converted into a list
+ * in the form ["a","7,"true"]
+ * @param t1 tuple elements
+ * @return a single tuple as a list
+ */
+ public static List tuple(Object... t1) {
+ List l = new ArrayList(t1.length);
+ for (Object t : t1) {
+ l.add(t.toString());
+ }
+ return l;
+ }
+
+ /**
+ * Convert a socket address pair into a string tuple, (host, port).
+ * TODO JDK7: move to InetAddress.getHostString() to avoid DNS lookups.
+ * @param address an address
+ * @return an element for the address list
+ */
+ public static List marshall(InetSocketAddress address) {
+ return tuple(address.getHostName(), address.getPort());
+ }
+
+ /**
+ * Require a specific address type on an endpoint
+ * @param required required type
+ * @param epr endpoint
+ * @throws InvalidRecordException if the type is wrong
+ */
+ public static void requireAddressType(String required, Endpoint epr) throws
+ InvalidRecordException {
+ if (!required.equals(epr.addressType)) {
+ throw new InvalidRecordException(
+ epr.toString(),
+ "Address type of " + epr.addressType
+ + " does not match required type of "
+ + required);
+ }
+ }
+
+ /**
+ * Get a single URI endpoint
+ * @param epr endpoint
+ * @return the uri of the first entry in the address list. Null if the endpoint
+ * itself is null
+ * @throws InvalidRecordException if the type is wrong, there are no addresses
+ * or the payload ill-formatted
+ */
+ public static List retrieveAddressesUriType(Endpoint epr)
+ throws InvalidRecordException {
+ if (epr == null) {
+ return null;
+ }
+ requireAddressType(AddressTypes.ADDRESS_URI, epr);
+ List> addresses = epr.addresses;
+ if (addresses.size() < 1) {
+ throw new InvalidRecordException(epr.toString(),
+ "No addresses in endpoint");
+ }
+ List results = new ArrayList(addresses.size());
+ for (List address : addresses) {
+ if (address.size() != 1) {
+ throw new InvalidRecordException(epr.toString(),
+ "Address payload invalid: wrong element count: " +
+ address.size());
+ }
+ results.add(address.get(0));
+ }
+ return results;
+ }
+
+ /**
+ * Get the address URLs. Guranteed to return at least one address.
+ * @param epr endpoint
+ * @return the address as a URL
+ * @throws InvalidRecordException if the type is wrong, there are no addresses
+ * or the payload ill-formatted
+ * @throws MalformedURLException address can't be turned into a URL
+ */
+ public static List retrieveAddressURLs(Endpoint epr)
+ throws InvalidRecordException, MalformedURLException {
+ if (epr == null) {
+ throw new InvalidRecordException("", "Null endpoint");
+ }
+ List addresses = retrieveAddressesUriType(epr);
+ List results = new ArrayList(addresses.size());
+ for (String address : addresses) {
+ results.add(new URL(address));
+ }
+ return results;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java
new file mode 100644
index 0000000000..3b28a02a64
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java
@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.ServiceRecordHeader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.*;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Utility methods for working with a registry.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RegistryUtils {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RegistryUtils.class);
+
+ /**
+ * Buld the user path -switches to the system path if the user is "".
+ * It also cross-converts the username to ascii via punycode
+ * @param shortname username or ""
+ * @return the path to the user
+ */
+ public static String homePathForUser(String shortname) {
+ Preconditions.checkArgument(shortname != null, "null user");
+
+ // catch recursion
+ if (shortname.startsWith(RegistryConstants.PATH_USERS)) {
+ return shortname;
+ }
+ if (shortname.isEmpty()) {
+ return RegistryConstants.PATH_SYSTEM_SERVICES;
+ }
+ return RegistryPathUtils.join(RegistryConstants.PATH_USERS,
+ encodeForRegistry(shortname));
+ }
+
+ /**
+ * Create a service classpath
+ * @param user username or ""
+ * @param serviceClass service name
+ * @return a full path
+ */
+ public static String serviceclassPath(String user,
+ String serviceClass) {
+ String services = join(homePathForUser(user),
+ RegistryConstants.PATH_USER_SERVICES);
+ return join(services,
+ serviceClass);
+ }
+
+ /**
+ * Create a path to a service under a user & service class
+ * @param user username or ""
+ * @param serviceClass service name
+ * @param serviceName service name unique for that user & service class
+ * @return a full path
+ */
+ public static String servicePath(String user,
+ String serviceClass,
+ String serviceName) {
+
+ return join(
+ serviceclassPath(user, serviceClass),
+ serviceName);
+ }
+
+ /**
+ * Create a path for listing components under a service
+ * @param user username or ""
+ * @param serviceClass service name
+ * @param serviceName service name unique for that user & service class
+ * @return a full path
+ */
+ public static String componentListPath(String user,
+ String serviceClass, String serviceName) {
+
+ return join(servicePath(user, serviceClass, serviceName),
+ RegistryConstants.SUBPATH_COMPONENTS);
+ }
+
+ /**
+ * Create the path to a service record for a component
+ * @param user username or ""
+ * @param serviceClass service name
+ * @param serviceName service name unique for that user & service class
+ * @param componentName unique name/ID of the component
+ * @return a full path
+ */
+ public static String componentPath(String user,
+ String serviceClass, String serviceName, String componentName) {
+
+ return join(
+ componentListPath(user, serviceClass, serviceName),
+ componentName);
+ }
+
+ /**
+ * List service records directly under a path
+ * @param registryOperations registry operations instance
+ * @param path path to list
+ * @return a mapping of the service records that were resolved, indexed
+ * by their full path
+ * @throws IOException
+ */
+ public static Map listServiceRecords(
+ RegistryOperations registryOperations,
+ String path) throws IOException {
+ Map children =
+ statChildren(registryOperations, path);
+ return extractServiceRecords(registryOperations,
+ path,
+ children.values());
+ }
+
+ /**
+ * List children of a directory and retrieve their
+ * {@link RegistryPathStatus} values.
+ *
+ * This is not an atomic operation; A child may be deleted
+ * during the iteration through the child entries. If this happens,
+ * the PathNotFoundException is caught and that child
+ * entry ommitted.
+ *
+ * @param path path
+ * @return a possibly empty map of child entries listed by
+ * their short name.
+ * @throws PathNotFoundException path is not in the registry.
+ * @throws InvalidPathnameException the path is invalid.
+ * @throws IOException Any other IO Exception
+ */
+ public static Map statChildren(
+ RegistryOperations registryOperations,
+ String path)
+ throws PathNotFoundException,
+ InvalidPathnameException,
+ IOException {
+ List childNames = registryOperations.list(path);
+ Map results =
+ new HashMap();
+ for (String childName : childNames) {
+ String child = join(path, childName);
+ try {
+ RegistryPathStatus stat = registryOperations.stat(child);
+ results.put(childName, stat);
+ } catch (PathNotFoundException pnfe) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("stat failed on {}: moved? {}", child, pnfe, pnfe);
+ }
+ // and continue
+ }
+ }
+ return results;
+ }
+
+ /**
+ * Get the home path of the current user.
+ *
+ * In an insecure cluster, the environment variable
+ * HADOOP_USER_NAME is queried first.
+ *
+ * This means that in a YARN container where the creator set this
+ * environment variable to propagate their identity, the defined
+ * user name is used in preference to the actual user.
+ *
+ * In a secure cluster, the kerberos identity of the current user is used.
+ * @return a path for the current user's home dir.
+ * @throws RuntimeException if the current user identity cannot be determined
+ * from the OS/kerberos.
+ */
+ public static String homePathForCurrentUser() {
+ String shortUserName = currentUsernameUnencoded();
+ return homePathForUser(shortUserName);
+ }
+
+ /**
+ * Get the current username, before any encoding has been applied.
+ * @return the current user from the kerberos identity, falling back
+ * to the user and/or env variables.
+ */
+ private static String currentUsernameUnencoded() {
+ String env_hadoop_username = System.getenv(
+ RegistryInternalConstants.HADOOP_USER_NAME);
+ return getCurrentUsernameUnencoded(env_hadoop_username);
+ }
+
+ /**
+ * Get the current username, using the value of the parameter
+ * env_hadoop_username if it is set on an insecure cluster.
+ * This ensures that the username propagates correctly across processes
+ * started by YARN.
+ *
+ * This method is primarly made visible for testing.
+ * @param env_hadoop_username the environment variable
+ * @return the selected username
+ * @throws RuntimeException if there is a problem getting the short user
+ * name of the current user.
+ */
+ @VisibleForTesting
+ public static String getCurrentUsernameUnencoded(String env_hadoop_username) {
+ String shortUserName = null;
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ shortUserName = env_hadoop_username;
+ }
+ if (StringUtils.isEmpty(shortUserName)) {
+ try {
+ shortUserName = UserGroupInformation.getCurrentUser().getShortUserName();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ return shortUserName;
+ }
+
+ /**
+ * Get the current user path formatted for the registry
+ *
+ * In an insecure cluster, the environment variable
+ * HADOOP_USER_NAME is queried first.
+ *
+ * This means that in a YARN container where the creator set this
+ * environment variable to propagate their identity, the defined
+ * user name is used in preference to the actual user.
+ *
+ * In a secure cluster, the kerberos identity of the current user is used.
+ * @return the encoded shortname of the current user
+ * @throws RuntimeException if the current user identity cannot be determined
+ * from the OS/kerberos.
+ *
+ */
+ public static String currentUser() {
+ String shortUserName = currentUsernameUnencoded();
+ return encodeForRegistry(shortUserName);
+ }
+
+ /**
+ * Extract all service records under a list of stat operations...this
+ * skips entries that are too short or simply not matching
+ * @param operations operation support for fetches
+ * @param parentpath path of the parent of all the entries
+ * @param stats Collection of stat results
+ * @return a possibly empty map of fullpath:record.
+ * @throws IOException for any IO Operation that wasn't ignored.
+ */
+ public static Map extractServiceRecords(
+ RegistryOperations operations,
+ String parentpath,
+ Collection stats) throws IOException {
+ Map results = new HashMap(stats.size());
+ for (RegistryPathStatus stat : stats) {
+ if (stat.size > ServiceRecordHeader.getLength()) {
+ // maybe has data
+ String path = join(parentpath, stat.path);
+ try {
+ ServiceRecord serviceRecord = operations.resolve(path);
+ results.put(path, serviceRecord);
+ } catch (EOFException ignored) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("data too short for {}", path);
+ }
+ } catch (InvalidRecordException record) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Invalid record at {}", path);
+ }
+ } catch (NoRecordException record) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("No record at {}", path);
+ }
+ }
+ }
+ }
+ return results;
+ }
+
+ /**
+ * Extract all service records under a list of stat operations...this
+ * non-atomic action skips entries that are too short or simply not matching.
+ *
+ * @param operations operation support for fetches
+ * @param parentpath path of the parent of all the entries
+ * @param stats a map of name:value mappings.
+ * @return a possibly empty map of fullpath:record.
+ * @throws IOException for any IO Operation that wasn't ignored.
+ */
+ public static Map extractServiceRecords(
+ RegistryOperations operations,
+ String parentpath,
+ Map stats) throws IOException {
+ return extractServiceRecords(operations, parentpath, stats.values());
+ }
+
+
+ /**
+ * Extract all service records under a list of stat operations...this
+ * non-atomic action skips entries that are too short or simply not matching.
+ *
+ * @param operations operation support for fetches
+ * @param parentpath path of the parent of all the entries
+ * @param stats a map of name:value mappings.
+ * @return a possibly empty map of fullpath:record.
+ * @throws IOException for any IO Operation that wasn't ignored.
+ */
+ public static Map extractServiceRecords(
+ RegistryOperations operations,
+ String parentpath) throws IOException {
+ return
+ extractServiceRecords(operations,
+ parentpath,
+ statChildren(operations, parentpath).values());
+ }
+
+
+
+ /**
+ * Static instance of service record marshalling
+ */
+ public static class ServiceRecordMarshal extends JsonSerDeser {
+ public ServiceRecordMarshal() {
+ super(ServiceRecord.class, ServiceRecordHeader.getData());
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/package-info.java
new file mode 100644
index 0000000000..f99aa71c8d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Registry binding utility classes.
+ */
+package org.apache.hadoop.registry.client.binding;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java
new file mode 100644
index 0000000000..aadb7fc46d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.exceptions;
+
+/**
+ * Exception raised when client access wasn't authenticated.
+ * That is: the credentials provided were incomplete or invalid.
+ */
+public class AuthenticationFailedException extends RegistryIOException {
+ public AuthenticationFailedException(String path, Throwable cause) {
+ super(path, cause);
+ }
+
+ public AuthenticationFailedException(String path, String error) {
+ super(path, error);
+ }
+
+ public AuthenticationFailedException(String path,
+ String error,
+ Throwable cause) {
+ super(path, error, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidPathnameException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidPathnameException.java
new file mode 100644
index 0000000000..c984f41059
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidPathnameException.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A path name was invalid. This is raised when a path string has
+ * characters in it that are not permitted.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class InvalidPathnameException extends RegistryIOException {
+ public InvalidPathnameException(String path, String message) {
+ super(path, message);
+ }
+
+ public InvalidPathnameException(String path,
+ String message,
+ Throwable cause) {
+ super(path, message, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java
new file mode 100644
index 0000000000..e4f545e5b4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Raised if an attempt to parse a record failed.
+ *
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class InvalidRecordException extends RegistryIOException {
+
+ public InvalidRecordException(String path, String error) {
+ super(path, error);
+ }
+
+ public InvalidRecordException(String path,
+ String error,
+ Throwable cause) {
+ super(path, error, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoChildrenForEphemeralsException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoChildrenForEphemeralsException.java
new file mode 100644
index 0000000000..24070a5a37
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoChildrenForEphemeralsException.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This is a manifestation of the Zookeeper restrictions about
+ * what nodes may act as parents.
+ *
+ * Children are not allowed under ephemeral nodes. This is an aspect
+ * of ZK which isn't directly exposed to the registry API. It may
+ * surface if the registry is manipulated outside of the registry API.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class NoChildrenForEphemeralsException extends RegistryIOException {
+ public NoChildrenForEphemeralsException(String path, Throwable cause) {
+ super(path, cause);
+ }
+
+ public NoChildrenForEphemeralsException(String path, String error) {
+ super(path, error);
+ }
+
+ public NoChildrenForEphemeralsException(String path,
+ String error,
+ Throwable cause) {
+ super(path, error, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java
new file mode 100644
index 0000000000..ce84f5b610
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.exceptions;
+
+import org.apache.hadoop.fs.PathIOException;
+
+/**
+ * Raised on path permission exceptions.
+ *
+ * This is similar to PathIOException, except that exception doesn't let
+ */
+public class NoPathPermissionsException extends RegistryIOException {
+ public NoPathPermissionsException(String path, Throwable cause) {
+ super(path, cause);
+ }
+
+ public NoPathPermissionsException(String path, String error) {
+ super(path, error);
+ }
+
+ public NoPathPermissionsException(String path, String error, Throwable cause) {
+ super(path, error, cause);
+ }
+
+ public NoPathPermissionsException(String message,
+ PathIOException cause) {
+ super(message, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoRecordException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoRecordException.java
new file mode 100644
index 0000000000..160433f081
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoRecordException.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.ServiceRecordHeader;
+
+/**
+ * Raised if there is no {@link ServiceRecord} resolved at the end
+ * of the specified path, for reasons such as:
+ *
+ *
There wasn't enough data to contain a Service Record.
+ *
The start of the data did not match the {@link ServiceRecordHeader}
+ * header.
+ *
+ *
+ * There may be valid data of some form at the end of the path, but it does
+ * not appear to be a Service Record.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class NoRecordException extends RegistryIOException {
+
+ public NoRecordException(String path, String error) {
+ super(path, error);
+ }
+
+ public NoRecordException(String path,
+ String error,
+ Throwable cause) {
+ super(path, error, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/RegistryIOException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/RegistryIOException.java
new file mode 100644
index 0000000000..ca966dba05
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/RegistryIOException.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.PathIOException;
+
+/**
+ * Base exception for registry operations.
+ *
+ * These exceptions include the path of the failing operation wherever possible;
+ * this can be retrieved via {@link PathIOException#getPath()}.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RegistryIOException extends PathIOException {
+
+ /**
+ * Build an exception from any other Path IO Exception.
+ * This propagates the path of the original exception
+ * @param message more specific text
+ * @param cause cause
+ */
+ public RegistryIOException(String message, PathIOException cause) {
+ super(cause.getPath() != null ? cause.getPath().toString() : "",
+ message,
+ cause);
+ }
+
+ public RegistryIOException(String path, Throwable cause) {
+ super(path, cause);
+ }
+
+ public RegistryIOException(String path, String error) {
+ super(path, error);
+ }
+
+ public RegistryIOException(String path, String error, Throwable cause) {
+ super(path, error, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java
new file mode 100644
index 0000000000..7d9c8ade83
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Registry Service Exceptions
+ *
+ * These are the Registry-specific exceptions that may be raised during
+ * Registry operations.
+ *
+ * Other exceptions may be raised, especially IOExceptions
+ * triggered by network problems, and IllegalArgumentException
+ * exceptions that may be raised if invalid (often null) arguments are passed
+ * to a method call.
+ *
+ * All exceptions in this package are derived from
+ * {@link org.apache.hadoop.registry.client.exceptions.RegistryIOException}
+ */
+package org.apache.hadoop.registry.client.exceptions;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/RegistryOperationsClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/RegistryOperationsClient.java
new file mode 100644
index 0000000000..db03936786
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/RegistryOperationsClient.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
+import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
+
+
+/**
+ * This is the client service for applications to work with the registry.
+ *
+ * It does not set up the root paths for the registry, is bonded
+ * to a user, and can be set to use SASL, anonymous or id:pass auth.
+ *
+ * For SASL, the client must be operating in the context of an authed user.
+ *
+ * For id:pass the client must have the relevant id & password, SASL is
+ * not used even if the client has credentials.
+ *
+ * For anonymous, nothing is used.
+ *
+ * Any SASL-authed client also has the ability to add one or more authentication
+ * id:pass pair on all future writes, and to reset them later.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RegistryOperationsClient extends RegistryOperationsService {
+
+ public RegistryOperationsClient(String name) {
+ super(name);
+ }
+
+ public RegistryOperationsClient(String name,
+ RegistryBindingSource bindingSource) {
+ super(name, bindingSource);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/package-info.java
new file mode 100644
index 0000000000..d85b6a7dfa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Registry client services
+ *
+ * These are classes which follow the YARN lifecycle and which implement
+ * the {@link org.apache.hadoop.registry.client.api.RegistryOperations}
+ * API.
+ */
+package org.apache.hadoop.registry.client.impl;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java
new file mode 100644
index 0000000000..8ae003d548
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import org.apache.curator.ensemble.EnsembleProvider;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Binding information provided by a {@link RegistryBindingSource}
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class BindingInformation {
+
+ /**
+ * The Curator Ensemble Provider
+ */
+ public EnsembleProvider ensembleProvider;
+
+ /**
+ * Any information that may be useful for diagnostics
+ */
+ public String description;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
new file mode 100644
index 0000000000..a0e6365386
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
@@ -0,0 +1,769 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.curator.ensemble.EnsembleProvider;
+import org.apache.curator.ensemble.fixed.FixedEnsembleProvider;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.api.BackgroundCallback;
+import org.apache.curator.framework.api.CreateBuilder;
+import org.apache.curator.framework.api.DeleteBuilder;
+import org.apache.curator.framework.api.GetChildrenBuilder;
+import org.apache.curator.retry.BoundedExponentialBackoffRetry;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException;
+import org.apache.hadoop.registry.client.exceptions.NoChildrenForEphemeralsException;
+import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
+import org.apache.hadoop.registry.client.exceptions.RegistryIOException;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * This service binds to Zookeeper via Apache Curator. It is more
+ * generic than just the YARN service registry; it does not implement
+ * any of the Registry Operations API.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CuratorService extends CompositeService
+ implements RegistryConstants, RegistryBindingSource {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CuratorService.class);
+
+ /**
+ * the Curator binding
+ */
+ private CuratorFramework curator;
+
+ /**
+ * Path to the registry root
+ */
+ private String registryRoot;
+
+ /**
+ * Supplied binding source. This defaults to being this
+ * service itself.
+ */
+ private final RegistryBindingSource bindingSource;
+
+ /**
+ * Security service
+ */
+ private RegistrySecurity registrySecurity;
+
+ /**
+ * the connection binding text for messages
+ */
+ private String connectionDescription;
+
+ /**
+ * Security connection diagnostics
+ */
+ private String securityConnectionDiagnostics = "";
+
+ /**
+ * Provider of curator "ensemble"; offers a basis for
+ * more flexible bonding in future.
+ */
+ private EnsembleProvider ensembleProvider;
+
+ /**
+ * Construct the service.
+ * @param name service name
+ * @param bindingSource source of binding information.
+ * If null: use this instance
+ */
+ public CuratorService(String name, RegistryBindingSource bindingSource) {
+ super(name);
+ if (bindingSource != null) {
+ this.bindingSource = bindingSource;
+ } else {
+ this.bindingSource = this;
+ }
+ }
+
+ /**
+ * Create an instance using this service as the binding source (i.e. read
+ * configuration options from the registry)
+ * @param name service name
+ */
+ public CuratorService(String name) {
+ this(name, null);
+ }
+
+ /**
+ * Init the service.
+ * This is where the security bindings are set up
+ * @param conf configuration of the service
+ * @throws Exception
+ */
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+
+ registryRoot = conf.getTrimmed(KEY_REGISTRY_ZK_ROOT,
+ DEFAULT_ZK_REGISTRY_ROOT);
+
+ // create and add the registy service
+ registrySecurity = new RegistrySecurity("registry security");
+ addService(registrySecurity);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating Registry with root {}", registryRoot);
+ }
+
+ super.serviceInit(conf);
+ }
+
+ /**
+ * Start the service.
+ * This is where the curator instance is started.
+ * @throws Exception
+ */
+ @Override
+ protected void serviceStart() throws Exception {
+ super.serviceStart();
+
+ // create the curator; rely on the registry security code
+ // to set up the JVM context and curator
+ curator = createCurator();
+ }
+
+ /**
+ * Close the ZK connection if it is open
+ */
+ @Override
+ protected void serviceStop() throws Exception {
+ IOUtils.closeStream(curator);
+ super.serviceStop();
+ }
+
+ /**
+ * Internal check that a service is in the live state
+ * @throws ServiceStateException if not
+ */
+ private void checkServiceLive() throws ServiceStateException {
+ if (!isInState(STATE.STARTED)) {
+ throw new ServiceStateException(
+ "Service " + getName() + " is in wrong state: "
+ + getServiceState());
+ }
+ }
+
+ /**
+ * Flag to indicate whether or not the registry is secure.
+ * Valid once the service is inited.
+ * @return service security policy
+ */
+ public boolean isSecure() {
+ return registrySecurity.isSecureRegistry();
+ }
+
+ /**
+ * Get the registry security helper
+ * @return the registry security helper
+ */
+ protected RegistrySecurity getRegistrySecurity() {
+ return registrySecurity;
+ }
+
+ /**
+ * Build the security diagnostics string
+ * @return a string for diagnostics
+ */
+ protected String buildSecurityDiagnostics() {
+ // build up the security connection diags
+ if (!isSecure()) {
+ return "security disabled";
+ } else {
+ StringBuilder builder = new StringBuilder();
+ builder.append("secure cluster; ");
+ builder.append(registrySecurity.buildSecurityDiagnostics());
+ return builder.toString();
+ }
+ }
+
+ /**
+ * Create a new curator instance off the root path; using configuration
+ * options provided in the service configuration to set timeouts and
+ * retry policy.
+ * @return the newly created creator
+ */
+ private CuratorFramework createCurator() throws IOException {
+ Configuration conf = getConfig();
+ createEnsembleProvider();
+ int sessionTimeout = conf.getInt(KEY_REGISTRY_ZK_SESSION_TIMEOUT,
+ DEFAULT_ZK_SESSION_TIMEOUT);
+ int connectionTimeout = conf.getInt(KEY_REGISTRY_ZK_CONNECTION_TIMEOUT,
+ DEFAULT_ZK_CONNECTION_TIMEOUT);
+ int retryTimes = conf.getInt(KEY_REGISTRY_ZK_RETRY_TIMES,
+ DEFAULT_ZK_RETRY_TIMES);
+ int retryInterval = conf.getInt(KEY_REGISTRY_ZK_RETRY_INTERVAL,
+ DEFAULT_ZK_RETRY_INTERVAL);
+ int retryCeiling = conf.getInt(KEY_REGISTRY_ZK_RETRY_CEILING,
+ DEFAULT_ZK_RETRY_CEILING);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating CuratorService with connection {}",
+ connectionDescription);
+ }
+ CuratorFramework framework;
+
+ synchronized (CuratorService.class) {
+ // set the security options
+
+ //log them
+ securityConnectionDiagnostics = buildSecurityDiagnostics();
+
+ // build up the curator itself
+ CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
+ builder.ensembleProvider(ensembleProvider)
+ .connectionTimeoutMs(connectionTimeout)
+ .sessionTimeoutMs(sessionTimeout)
+
+ .retryPolicy(new BoundedExponentialBackoffRetry(retryInterval,
+ retryCeiling,
+ retryTimes));
+
+ // set up the builder AND any JVM context
+ registrySecurity.applySecurityEnvironment(builder);
+
+ framework = builder.build();
+ framework.start();
+ }
+
+ return framework;
+ }
+
+ @Override
+ public String toString() {
+ return super.toString()
+ + bindingDiagnosticDetails();
+ }
+
+ /**
+ * Get the binding diagnostics
+ * @return a diagnostics string valid after the service is started.
+ */
+ public String bindingDiagnosticDetails() {
+ return " Connection=\"" + connectionDescription + "\""
+ + " root=\"" + registryRoot + "\""
+ + " " + securityConnectionDiagnostics;
+ }
+
+ /**
+ * Create a full path from the registry root and the supplied subdir
+ * @param path path of operation
+ * @return an absolute path
+ * @throws IllegalArgumentException if the path is invalide
+ */
+ protected String createFullPath(String path) throws IOException {
+ return RegistryPathUtils.createFullPath(registryRoot, path);
+ }
+
+ /**
+ * Get the registry binding source ... this can be used to
+ * create new ensemble providers
+ * @return the registry binding source in use
+ */
+ public RegistryBindingSource getBindingSource() {
+ return bindingSource;
+ }
+
+ /**
+ * Create the ensemble provider for this registry, by invoking
+ * {@link RegistryBindingSource#supplyBindingInformation()} on
+ * the provider stored in {@link #bindingSource}
+ * Sets {@link #ensembleProvider} to that value;
+ * sets {@link #connectionDescription} to the binding info
+ * for use in toString and logging;
+ *
+ */
+ protected void createEnsembleProvider() {
+ BindingInformation binding = bindingSource.supplyBindingInformation();
+ connectionDescription = binding.description
+ + " " + securityConnectionDiagnostics;
+ ensembleProvider = binding.ensembleProvider;
+ }
+
+ /**
+ * Supply the binding information.
+ * This implementation returns a fixed ensemble bonded to
+ * the quorum supplied by {@link #buildConnectionString()}
+ * @return the binding information
+ */
+ @Override
+ public BindingInformation supplyBindingInformation() {
+ BindingInformation binding = new BindingInformation();
+ String connectString = buildConnectionString();
+ binding.ensembleProvider = new FixedEnsembleProvider(connectString);
+ binding.description =
+ "fixed ZK quorum \"" + connectString + "\"";
+ return binding;
+ }
+
+ /**
+ * Override point: get the connection string used to connect to
+ * the ZK service
+ * @return a registry quorum
+ */
+ protected String buildConnectionString() {
+ return getConfig().getTrimmed(KEY_REGISTRY_ZK_QUORUM,
+ DEFAULT_REGISTRY_ZK_QUORUM);
+ }
+
+ /**
+ * Create an IOE when an operation fails
+ * @param path path of operation
+ * @param operation operation attempted
+ * @param exception caught the exception caught
+ * @return an IOE to throw that contains the path and operation details.
+ */
+ protected IOException operationFailure(String path,
+ String operation,
+ Exception exception) {
+ return operationFailure(path, operation, exception, null);
+ }
+
+ /**
+ * Create an IOE when an operation fails
+ * @param path path of operation
+ * @param operation operation attempted
+ * @param exception caught the exception caught
+ * @return an IOE to throw that contains the path and operation details.
+ */
+ protected IOException operationFailure(String path,
+ String operation,
+ Exception exception,
+ List acls) {
+ IOException ioe;
+ String aclList = "[" + RegistrySecurity.aclsToString(acls) + "]";
+ if (exception instanceof KeeperException.NoNodeException) {
+ ioe = new PathNotFoundException(path);
+ } else if (exception instanceof KeeperException.NodeExistsException) {
+ ioe = new FileAlreadyExistsException(path);
+ } else if (exception instanceof KeeperException.NoAuthException) {
+ ioe = new NoPathPermissionsException(path,
+ "Not authorized to access path; ACLs: " + aclList);
+ } else if (exception instanceof KeeperException.NotEmptyException) {
+ ioe = new PathIsNotEmptyDirectoryException(path);
+ } else if (exception instanceof KeeperException.AuthFailedException) {
+ ioe = new AuthenticationFailedException(path,
+ "Authentication Failed: " + exception, exception);
+ } else if (exception instanceof KeeperException.NoChildrenForEphemeralsException) {
+ ioe = new NoChildrenForEphemeralsException(path,
+ "Cannot create a path under an ephemeral node: " + exception,
+ exception);
+ } else if (exception instanceof KeeperException.InvalidACLException) {
+ // this is a security exception of a kind
+ // include the ACLs to help the diagnostics
+ StringBuilder builder = new StringBuilder();
+ builder.append("Path access failure ").append(aclList);
+ builder.append(" ");
+ builder.append(securityConnectionDiagnostics);
+ ioe = new NoPathPermissionsException(path, builder.toString());
+ } else {
+ ioe = new RegistryIOException(path,
+ "Failure of " + operation + " on " + path + ": " +
+ exception.toString(),
+ exception);
+ }
+ if (ioe.getCause() == null) {
+ ioe.initCause(exception);
+ }
+ return ioe;
+ }
+
+ /**
+ * Create a path if it does not exist.
+ * The check is poll + create; there's a risk that another process
+ * may create the same path before the create() operation is executed/
+ * propagated to the ZK node polled.
+ *
+ * @param path path to create
+ * @param acl ACL for path -used when creating a new entry
+ * @param createParents flag to trigger parent creation
+ * @return true iff the path was created
+ * @throws IOException
+ */
+ @VisibleForTesting
+ public boolean maybeCreate(String path,
+ CreateMode mode,
+ List acl,
+ boolean createParents) throws IOException {
+ return zkMkPath(path, mode, createParents, acl);
+ }
+
+ /**
+ * Stat the file
+ * @param path path of operation
+ * @return a curator stat entry
+ * @throws IOException on a failure
+ * @throws PathNotFoundException if the path was not found
+ */
+ public Stat zkStat(String path) throws IOException {
+ checkServiceLive();
+ String fullpath = createFullPath(path);
+ Stat stat;
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Stat {}", fullpath);
+ }
+ stat = curator.checkExists().forPath(fullpath);
+ } catch (Exception e) {
+ throw operationFailure(fullpath, "read()", e);
+ }
+ if (stat == null) {
+ throw new PathNotFoundException(path);
+ }
+ return stat;
+ }
+
+ /**
+ * Get the ACLs of a path
+ * @param path path of operation
+ * @return a possibly empty list of ACLs
+ * @throws IOException
+ */
+ public List zkGetACLS(String path) throws IOException {
+ checkServiceLive();
+ String fullpath = createFullPath(path);
+ List acls;
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GetACLS {}", fullpath);
+ }
+ acls = curator.getACL().forPath(fullpath);
+ } catch (Exception e) {
+ throw operationFailure(fullpath, "read()", e);
+ }
+ if (acls == null) {
+ throw new PathNotFoundException(path);
+ }
+ return acls;
+ }
+
+ /**
+ * Probe for a path existing
+ * @param path path of operation
+ * @return true if the path was visible from the ZK server
+ * queried.
+ * @throws IOException on any exception other than
+ * {@link PathNotFoundException}
+ */
+ public boolean zkPathExists(String path) throws IOException {
+ checkServiceLive();
+ try {
+ return zkStat(path) != null;
+ } catch (PathNotFoundException e) {
+ return false;
+ } catch (IOException e) {
+ throw e;
+ }
+ }
+
+ /**
+ * Verify a path exists
+ * @param path path of operation
+ * @throws PathNotFoundException if the path is absent
+ * @throws IOException
+ */
+ public String zkPathMustExist(String path) throws IOException {
+ zkStat(path);
+ return path;
+ }
+
+ /**
+ * Create a directory. It is not an error if it already exists
+ * @param path path to create
+ * @param mode mode for path
+ * @param createParents flag to trigger parent creation
+ * @param acls ACL for path
+ * @throws IOException any problem
+ */
+ public boolean zkMkPath(String path,
+ CreateMode mode,
+ boolean createParents,
+ List acls)
+ throws IOException {
+ checkServiceLive();
+ path = createFullPath(path);
+ if (acls == null || acls.isEmpty()) {
+ throw new NoPathPermissionsException(path, "Empty ACL list");
+ }
+
+ try {
+ RegistrySecurity.AclListInfo aclInfo =
+ new RegistrySecurity.AclListInfo(acls);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating path {} with mode {} and ACL {}",
+ path, mode, aclInfo);
+ }
+ CreateBuilder createBuilder = curator.create();
+ createBuilder.withMode(mode).withACL(acls);
+ if (createParents) {
+ createBuilder.creatingParentsIfNeeded();
+ }
+ createBuilder.forPath(path);
+
+ } catch (KeeperException.NodeExistsException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("path already present: {}", path, e);
+ }
+ return false;
+ } catch (Exception e) {
+ throw operationFailure(path, "mkdir() ", e, acls);
+ }
+ return true;
+ }
+
+ /**
+ * Recursively make a path
+ * @param path path to create
+ * @param acl ACL for path
+ * @throws IOException any problem
+ */
+ public void zkMkParentPath(String path,
+ List acl) throws
+ IOException {
+ // split path into elements
+
+ zkMkPath(RegistryPathUtils.parentOf(path),
+ CreateMode.PERSISTENT, true, acl);
+ }
+
+ /**
+ * Create a path with given data. byte[0] is used for a path
+ * without data
+ * @param path path of operation
+ * @param data initial data
+ * @param acls
+ * @throws IOException
+ */
+ public void zkCreate(String path,
+ CreateMode mode,
+ byte[] data,
+ List acls) throws IOException {
+ Preconditions.checkArgument(data != null, "null data");
+ checkServiceLive();
+ String fullpath = createFullPath(path);
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating {} with {} bytes of data and ACL {}",
+ fullpath, data.length,
+ new RegistrySecurity.AclListInfo(acls));
+ }
+ curator.create().withMode(mode).withACL(acls).forPath(fullpath, data);
+ } catch (Exception e) {
+ throw operationFailure(fullpath, "create()", e, acls);
+ }
+ }
+
+ /**
+ * Update the data for a path
+ * @param path path of operation
+ * @param data new data
+ * @throws IOException
+ */
+ public void zkUpdate(String path, byte[] data) throws IOException {
+ Preconditions.checkArgument(data != null, "null data");
+ checkServiceLive();
+ path = createFullPath(path);
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Updating {} with {} bytes", path, data.length);
+ }
+ curator.setData().forPath(path, data);
+ } catch (Exception e) {
+ throw operationFailure(path, "update()", e);
+ }
+ }
+
+ /**
+ * Create or update an entry
+ * @param path path
+ * @param data data
+ * @param acl ACL for path -used when creating a new entry
+ * @param overwrite enable overwrite
+ * @throws IOException
+ * @return true if the entry was created, false if it was simply updated.
+ */
+ public boolean zkSet(String path,
+ CreateMode mode,
+ byte[] data,
+ List acl, boolean overwrite) throws IOException {
+ Preconditions.checkArgument(data != null, "null data");
+ checkServiceLive();
+ if (!zkPathExists(path)) {
+ zkCreate(path, mode, data, acl);
+ return true;
+ } else {
+ if (overwrite) {
+ zkUpdate(path, data);
+ return false;
+ } else {
+ throw new FileAlreadyExistsException(path);
+ }
+ }
+ }
+
+ /**
+ * Delete a directory/directory tree.
+ * It is not an error to delete a path that does not exist
+ * @param path path of operation
+ * @param recursive flag to trigger recursive deletion
+ * @param backgroundCallback callback; this being set converts the operation
+ * into an async/background operation.
+ * task
+ * @throws IOException on problems other than no-such-path
+ */
+ public void zkDelete(String path,
+ boolean recursive,
+ BackgroundCallback backgroundCallback) throws IOException {
+ checkServiceLive();
+ String fullpath = createFullPath(path);
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Deleting {}", fullpath);
+ }
+ DeleteBuilder delete = curator.delete();
+ if (recursive) {
+ delete.deletingChildrenIfNeeded();
+ }
+ if (backgroundCallback != null) {
+ delete.inBackground(backgroundCallback);
+ }
+ delete.forPath(fullpath);
+ } catch (KeeperException.NoNodeException e) {
+ // not an error
+ } catch (Exception e) {
+ throw operationFailure(fullpath, "delete()", e);
+ }
+ }
+
+ /**
+ * List all children of a path
+ * @param path path of operation
+ * @return a possibly empty list of children
+ * @throws IOException
+ */
+ public List zkList(String path) throws IOException {
+ checkServiceLive();
+ String fullpath = createFullPath(path);
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("ls {}", fullpath);
+ }
+ GetChildrenBuilder builder = curator.getChildren();
+ List children = builder.forPath(fullpath);
+ return children;
+ } catch (Exception e) {
+ throw operationFailure(path, "ls()", e);
+ }
+ }
+
+ /**
+ * Read data on a path
+ * @param path path of operation
+ * @return the data
+ * @throws IOException read failure
+ */
+ public byte[] zkRead(String path) throws IOException {
+ checkServiceLive();
+ String fullpath = createFullPath(path);
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Reading {}", fullpath);
+ }
+ return curator.getData().forPath(fullpath);
+ } catch (Exception e) {
+ throw operationFailure(fullpath, "read()", e);
+ }
+ }
+
+ /**
+ * Return a path dumper instance which can do a full dump
+ * of the registry tree in its toString()
+ * operation
+ * @return a class to dump the registry
+ * @param verbose verbose flag - includes more details (such as ACLs)
+ */
+ public ZKPathDumper dumpPath(boolean verbose) {
+ return new ZKPathDumper(curator, registryRoot, verbose);
+ }
+
+ /**
+ * Add a new write access entry for all future write operations.
+ * @param id ID to use
+ * @param pass password
+ * @throws IOException on any failure to build the digest
+ */
+ public boolean addWriteAccessor(String id, String pass) throws IOException {
+ RegistrySecurity security = getRegistrySecurity();
+ ACL digestACL = new ACL(ZooDefs.Perms.ALL,
+ security.toDigestId(security.digest(id, pass)));
+ return security.addDigestACL(digestACL);
+ }
+
+ /**
+ * Clear all write accessors
+ */
+ public void clearWriteAccessors() {
+ getRegistrySecurity().resetDigestACLs();
+ }
+
+
+ /**
+ * Diagnostics method to dump a registry robustly.
+ * Any exception raised is swallowed
+ * @param verbose verbose path dump
+ * @return the registry tree
+ */
+ protected String dumpRegistryRobustly(boolean verbose) {
+ try {
+ ZKPathDumper pathDumper = dumpPath(verbose);
+ return pathDumper.toString();
+ } catch (Exception e) {
+ // ignore
+ LOG.debug("Ignoring exception: {}", e);
+ }
+ return "";
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java
new file mode 100644
index 0000000000..bab4742ad6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Interface which can be implemented by a registry binding source
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface RegistryBindingSource {
+
+ /**
+ * Supply the binding information for this registry
+ * @return the binding information data
+ */
+ BindingInformation supplyBindingInformation();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java
new file mode 100644
index 0000000000..f04673a08b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import org.apache.zookeeper.ZooDefs;
+
+/**
+ * Internal constants for the registry.
+ *
+ * These are the things which aren't visible to users.
+ *
+ */
+public interface RegistryInternalConstants {
+
+ /**
+ * Pattern of a single entry in the registry path. : {@value}.
+ *
+ * This is what constitutes a valid hostname according to current RFCs.
+ * Alphanumeric first two and last one digit, alphanumeric
+ * and hyphens allowed in between.
+ *
+ * No upper limit is placed on the size of an entry.
+ */
+ String VALID_PATH_ENTRY_PATTERN =
+ "([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])";
+
+ /**
+ * Permissions for readers: {@value}.
+ */
+ int PERMISSIONS_REGISTRY_READERS = ZooDefs.Perms.READ;
+
+ /**
+ * Permissions for system services: {@value}
+ */
+ int PERMISSIONS_REGISTRY_SYSTEM_SERVICES = ZooDefs.Perms.ALL;
+
+ /**
+ * Permissions for a user's root entry: {@value}.
+ * All except the admin permissions (ACL access) on a node
+ */
+ int PERMISSIONS_REGISTRY_USER_ROOT =
+ ZooDefs.Perms.READ | ZooDefs.Perms.WRITE | ZooDefs.Perms.CREATE |
+ ZooDefs.Perms.DELETE;
+
+ /**
+ * Name of the SASL auth provider which has to be added to ZK server to enable
+ * sasl: auth patterns: {@value}.
+ *
+ * Without this callers can connect via SASL, but
+ * they can't use it in ACLs
+ */
+ String SASLAUTHENTICATION_PROVIDER =
+ "org.apache.zookeeper.server.auth.SASLAuthenticationProvider";
+
+ /**
+ * String to use as the prefix when declaring a new auth provider: {@value}.
+ */
+ String ZOOKEEPER_AUTH_PROVIDER = "zookeeper.authProvider";
+
+ /**
+ * This the Hadoop environment variable which propagates the identity
+ * of a user in an insecure cluster
+ */
+ String HADOOP_USER_NAME = "HADOOP_USER_NAME";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java
new file mode 100644
index 0000000000..c54c205ce9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * The Registry operations service.
+ *
+ * This service implements the {@link RegistryOperations}
+ * API by mapping the commands to zookeeper operations, and translating
+ * results and exceptions back into those specified by the API.
+ *
+ * Factory methods should hide the detail that this has been implemented via
+ * the {@link CuratorService} by returning it cast to that
+ * {@link RegistryOperations} interface, rather than this implementation class.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RegistryOperationsService extends CuratorService
+ implements RegistryOperations {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RegistryOperationsService.class);
+
+ private final RegistryUtils.ServiceRecordMarshal serviceRecordMarshal
+ = new RegistryUtils.ServiceRecordMarshal();
+
+ public RegistryOperationsService(String name) {
+ this(name, null);
+ }
+
+ public RegistryOperationsService() {
+ this("RegistryOperationsService");
+ }
+
+ public RegistryOperationsService(String name,
+ RegistryBindingSource bindingSource) {
+ super(name, bindingSource);
+ }
+
+ /**
+ * Get the aggregate set of ACLs the client should use
+ * to create directories
+ * @return the ACL list
+ */
+ public List getClientAcls() {
+ return getRegistrySecurity().getClientACLs();
+ }
+
+ /**
+ * Validate a path ... this includes checking that they are DNS-valid
+ * @param path path to validate
+ * @throws InvalidPathnameException if a path is considered invalid
+ */
+ protected void validatePath(String path) throws InvalidPathnameException {
+ RegistryPathUtils.validateElementsAsDNS(path);
+ }
+
+ @Override
+ public boolean mknode(String path, boolean createParents) throws IOException {
+ validatePath(path);
+ return zkMkPath(path, CreateMode.PERSISTENT, createParents, getClientAcls());
+ }
+
+ @Override
+ public void bind(String path,
+ ServiceRecord record,
+ int flags) throws IOException {
+ Preconditions.checkArgument(record != null, "null record");
+ validatePath(path);
+ LOG.info("Bound at {} : {}", path, record);
+
+ CreateMode mode = CreateMode.PERSISTENT;
+ byte[] bytes = serviceRecordMarshal.toByteswithHeader(record);
+ zkSet(path, mode, bytes, getClientAcls(),
+ ((flags & BindFlags.OVERWRITE) != 0));
+ }
+
+ @Override
+ public ServiceRecord resolve(String path) throws IOException {
+ byte[] bytes = zkRead(path);
+ return serviceRecordMarshal.fromBytesWithHeader(path, bytes);
+ }
+
+ @Override
+ public boolean exists(String path) throws IOException {
+ validatePath(path);
+ return zkPathExists(path);
+ }
+
+ @Override
+ public RegistryPathStatus stat(String path) throws IOException {
+ validatePath(path);
+ Stat stat = zkStat(path);
+
+ String name = RegistryPathUtils.lastPathEntry(path);
+ RegistryPathStatus status = new RegistryPathStatus(
+ name,
+ stat.getCtime(),
+ stat.getDataLength(),
+ stat.getNumChildren());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Stat {} => {}", path, status);
+ }
+ return status;
+ }
+
+ @Override
+ public List list(String path) throws IOException {
+ validatePath(path);
+ return zkList(path);
+ }
+
+ @Override
+ public void delete(String path, boolean recursive) throws IOException {
+ validatePath(path);
+ zkDelete(path, recursive, null);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
new file mode 100644
index 0000000000..6484d287f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -0,0 +1,996 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.util.ZKUtil;
+import org.apache.zookeeper.Environment;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.client.ZooKeeperSaslClient;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Locale;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+import static org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.*;
+import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+
+/**
+ * Implement the registry security ... a self contained service for
+ * testability.
+ *
+ * This class contains:
+ *
+ *
+ * The registry security policy implementation, configuration reading, ACL
+ * setup and management
+ *
+ *
Lots of static helper methods to aid security setup and debugging
+ *
+ */
+
+public class RegistrySecurity extends AbstractService {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RegistrySecurity.class);
+
+ public static final String E_UNKNOWN_AUTHENTICATION_MECHANISM =
+ "Unknown/unsupported authentication mechanism; ";
+
+ /**
+ * there's no default user to add with permissions, so it would be
+ * impossible to create nodes with unrestricted user access
+ */
+ public static final String E_NO_USER_DETERMINED_FOR_ACLS =
+ "No user for ACLs determinable from current user or registry option "
+ + KEY_REGISTRY_USER_ACCOUNTS;
+
+ /**
+ * Error raised when the registry is tagged as secure but this
+ * process doesn't have hadoop security enabled.
+ */
+ public static final String E_NO_KERBEROS =
+ "Registry security is enabled -but Hadoop security is not enabled";
+
+ /**
+ * Access policy options
+ */
+ private enum AccessPolicy {
+ anon, sasl, digest
+ }
+
+ /**
+ * Access mechanism
+ */
+ private AccessPolicy access;
+
+ /**
+ * User used for digest auth
+ */
+
+ private String digestAuthUser;
+
+ /**
+ * Password used for digest auth
+ */
+
+ private String digestAuthPassword;
+
+ /**
+ * Auth data used for digest auth
+ */
+ private byte[] digestAuthData;
+
+ /**
+ * flag set to true if the registry has security enabled.
+ */
+ private boolean secureRegistry;
+
+ /**
+ * An ACL with read-write access for anyone
+ */
+ public static final ACL ALL_READWRITE_ACCESS =
+ new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE);
+
+ /**
+ * An ACL with read access for anyone
+ */
+ public static final ACL ALL_READ_ACCESS =
+ new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE);
+
+ /**
+ * An ACL list containing the {@link #ALL_READWRITE_ACCESS} entry.
+ * It is copy on write so can be shared without worry
+ */
+ public static final List WorldReadWriteACL;
+
+ static {
+ List acls = new ArrayList();
+ acls.add(ALL_READWRITE_ACCESS);
+ WorldReadWriteACL = new CopyOnWriteArrayList(acls);
+ }
+
+ /**
+ * the list of system ACLs
+ */
+ private final List systemACLs = new ArrayList();
+
+ /**
+ * A list of digest ACLs which can be added to permissions
+ * —and cleared later.
+ */
+ private final List digestACLs = new ArrayList();
+
+ /**
+ * the default kerberos realm
+ */
+ private String kerberosRealm;
+
+ /**
+ * Client context
+ */
+ private String jaasClientContext;
+
+ /**
+ * Client identity
+ */
+ private String jaasClientIdentity;
+
+ /**
+ * Create an instance
+ * @param name service name
+ */
+ public RegistrySecurity(String name) {
+ super(name);
+ }
+
+ /**
+ * Init the service: this sets up security based on the configuration
+ * @param conf configuration
+ * @throws Exception
+ */
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+ super.serviceInit(conf);
+ String auth = conf.getTrimmed(KEY_REGISTRY_CLIENT_AUTH,
+ REGISTRY_CLIENT_AUTH_ANONYMOUS);
+
+ // TODO JDK7 SWITCH
+ if (REGISTRY_CLIENT_AUTH_KERBEROS.equals(auth)) {
+ access = AccessPolicy.sasl;
+ } else if (REGISTRY_CLIENT_AUTH_DIGEST.equals(auth)) {
+ access = AccessPolicy.digest;
+ } else if (REGISTRY_CLIENT_AUTH_ANONYMOUS.equals(auth)) {
+ access = AccessPolicy.anon;
+ } else {
+ throw new ServiceStateException(E_UNKNOWN_AUTHENTICATION_MECHANISM
+ + "\"" + auth + "\"");
+ }
+ initSecurity();
+ }
+
+ /**
+ * Init security.
+ *
+ * After this operation, the {@link #systemACLs} list is valid.
+ * @throws IOException
+ */
+ private void initSecurity() throws IOException {
+
+ secureRegistry =
+ getConfig().getBoolean(KEY_REGISTRY_SECURE, DEFAULT_REGISTRY_SECURE);
+ systemACLs.clear();
+ if (secureRegistry) {
+ addSystemACL(ALL_READ_ACCESS);
+
+ // determine the kerberos realm from JVM and settings
+ kerberosRealm = getConfig().get(KEY_REGISTRY_KERBEROS_REALM,
+ getDefaultRealmInJVM());
+
+ // System Accounts
+ String system = getOrFail(KEY_REGISTRY_SYSTEM_ACCOUNTS,
+ DEFAULT_REGISTRY_SYSTEM_ACCOUNTS);
+
+ systemACLs.addAll(buildACLs(system, kerberosRealm, ZooDefs.Perms.ALL));
+
+ // user accounts (may be empty, but for digest one user AC must
+ // be built up
+ String user = getConfig().get(KEY_REGISTRY_USER_ACCOUNTS,
+ DEFAULT_REGISTRY_USER_ACCOUNTS);
+ List userACLs = buildACLs(user, kerberosRealm, ZooDefs.Perms.ALL);
+
+ // add self if the current user can be determined
+ ACL self;
+ if (UserGroupInformation.isSecurityEnabled()) {
+ self = createSaslACLFromCurrentUser(ZooDefs.Perms.ALL);
+ if (self != null) {
+ userACLs.add(self);
+ }
+ }
+
+ // here check for UGI having secure on or digest + ID
+ switch (access) {
+ case sasl:
+ // secure + SASL => has to be authenticated
+ if (!UserGroupInformation.isSecurityEnabled()) {
+ throw new IOException("Kerberos required for secure registry access");
+ }
+ UserGroupInformation currentUser =
+ UserGroupInformation.getCurrentUser();
+ jaasClientContext = getOrFail(KEY_REGISTRY_CLIENT_JAAS_CONTEXT,
+ DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT);
+ jaasClientIdentity = currentUser.getShortUserName();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Auth is SASL user=\"{}\" JAAS context=\"{}\"",
+ jaasClientIdentity,
+ jaasClientContext);
+ }
+ break;
+
+ case digest:
+ String id = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, "");
+ String pass = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, "");
+ if (userACLs.isEmpty()) {
+ //
+ throw new ServiceStateException(E_NO_USER_DETERMINED_FOR_ACLS);
+ }
+ digest(id, pass);
+ ACL acl = new ACL(ZooDefs.Perms.ALL, toDigestId(id, pass));
+ userACLs.add(acl);
+ digestAuthUser = id;
+ digestAuthPassword = pass;
+ String authPair = id + ":" + pass;
+ digestAuthData = authPair.getBytes("UTF-8");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Auth is Digest ACL: {}", aclToString(acl));
+ }
+ break;
+
+ case anon:
+ // nothing is needed; account is read only.
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Auth is anonymous");
+ }
+ userACLs = new ArrayList(0);
+ break;
+ }
+ systemACLs.addAll(userACLs);
+
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Registry has no security");
+ }
+ // wide open cluster, adding system acls
+ systemACLs.addAll(WorldReadWriteACL);
+ }
+ }
+
+ /**
+ * Add another system ACL
+ * @param acl add ACL
+ */
+ public void addSystemACL(ACL acl) {
+ systemACLs.add(acl);
+ }
+
+ /**
+ * Add a digest ACL
+ * @param acl add ACL
+ */
+ public boolean addDigestACL(ACL acl) {
+ if (secureRegistry) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added ACL {}", aclToString(acl));
+ }
+ digestACLs.add(acl);
+ return true;
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Ignoring added ACL - registry is insecure{}",
+ aclToString(acl));
+ }
+ return false;
+ }
+ }
+
+ /**
+ * Reset the digest ACL list
+ */
+ public void resetDigestACLs() {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Cleared digest ACLs");
+ }
+ digestACLs.clear();
+ }
+
+ /**
+ * Flag to indicate the cluster is secure
+ * @return true if the config enabled security
+ */
+ public boolean isSecureRegistry() {
+ return secureRegistry;
+ }
+
+ /**
+ * Get the system principals
+ * @return the system principals
+ */
+ public List getSystemACLs() {
+ Preconditions.checkNotNull(systemACLs, "registry security is unitialized");
+ return Collections.unmodifiableList(systemACLs);
+ }
+
+ /**
+ * Get all ACLs needed for a client to use when writing to the repo.
+ * That is: system ACLs, its own ACL, any digest ACLs
+ * @return the client ACLs
+ */
+ public List getClientACLs() {
+ List clientACLs = new ArrayList(systemACLs);
+ clientACLs.addAll(digestACLs);
+ return clientACLs;
+ }
+
+ /**
+ * Create a SASL ACL for the user
+ * @param perms permissions
+ * @return an ACL for the current user or null if they aren't a kerberos user
+ * @throws IOException
+ */
+ public ACL createSaslACLFromCurrentUser(int perms) throws IOException {
+ UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+ if (currentUser.hasKerberosCredentials()) {
+ return createSaslACL(currentUser, perms);
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Given a UGI, create a SASL ACL from it
+ * @param ugi UGI
+ * @param perms permissions
+ * @return a new ACL
+ */
+ public ACL createSaslACL(UserGroupInformation ugi, int perms) {
+ String userName = ugi.getUserName();
+ return new ACL(perms, new Id(SCHEME_SASL, userName));
+ }
+
+ /**
+ * Get a conf option, throw an exception if it is null/empty
+ * @param key key
+ * @param defval default value
+ * @return the value
+ * @throws IOException if missing
+ */
+ private String getOrFail(String key, String defval) throws IOException {
+ String val = getConfig().get(key, defval);
+ if (StringUtils.isEmpty(val)) {
+ throw new IOException("Missing value for configuration option " + key);
+ }
+ return val;
+ }
+
+ /**
+ * Check for an id:password tuple being valid.
+ * This test is stricter than that in {@link DigestAuthenticationProvider},
+ * which splits the string, but doesn't check the contents of each
+ * half for being non-"".
+ * @param idPasswordPair id:pass pair
+ * @return true if the pass is considered valid.
+ */
+ public boolean isValid(String idPasswordPair) {
+ String[] parts = idPasswordPair.split(":");
+ return parts.length == 2
+ && !StringUtils.isEmpty(parts[0])
+ && !StringUtils.isEmpty(parts[1]);
+ }
+
+ /**
+ * Get the derived kerberos realm.
+ * @return this is built from the JVM realm, or the configuration if it
+ * overrides it. If "", it means "don't know".
+ */
+ public String getKerberosRealm() {
+ return kerberosRealm;
+ }
+
+ /**
+ * Generate a base-64 encoded digest of the idPasswordPair pair
+ * @param idPasswordPair id:password
+ * @return a string that can be used for authentication
+ */
+ public String digest(String idPasswordPair) throws IOException {
+ if (StringUtils.isEmpty(idPasswordPair) || !isValid(idPasswordPair)) {
+ throw new IOException("Invalid id:password: " + idPasswordPair);
+ }
+ try {
+ return DigestAuthenticationProvider.generateDigest(idPasswordPair);
+ } catch (NoSuchAlgorithmException e) {
+ // unlikely since it is standard to the JVM, but maybe JCE restrictions
+ // could trigger it
+ throw new IOException(e.toString(), e);
+ }
+ }
+
+ /**
+ * Generate a base-64 encoded digest of the idPasswordPair pair
+ * @param id ID
+ * @param password pass
+ * @return a string that can be used for authentication
+ * @throws IOException
+ */
+ public String digest(String id, String password) throws IOException {
+ return digest(id + ":" + password);
+ }
+
+ /**
+ * Given a digest, create an ID from it
+ * @param digest digest
+ * @return ID
+ */
+ public Id toDigestId(String digest) {
+ return new Id(SCHEME_DIGEST, digest);
+ }
+
+ /**
+ * Create a Digest ID from an id:pass pair
+ * @param id ID
+ * @param password password
+ * @return an ID
+ * @throws IOException
+ */
+ public Id toDigestId(String id, String password) throws IOException {
+ return toDigestId(digest(id, password));
+ }
+
+ /**
+ * Split up a list of the form
+ * sasl:mapred@,digest:5f55d66, sasl@yarn@EXAMPLE.COM
+ * into a list of possible ACL values, trimming as needed
+ *
+ * The supplied realm is added to entries where
+ *
+ *
the string begins "sasl:"
+ *
the string ends with "@"
+ *
+ * No attempt is made to validate any of the acl patterns.
+ *
+ * @param aclString list of 0 or more ACLs
+ * @param realm realm to add
+ * @return a list of split and potentially patched ACL pairs.
+ *
+ */
+ public List splitAclPairs(String aclString, String realm) {
+ List list = Lists.newArrayList(
+ Splitter.on(',').omitEmptyStrings().trimResults()
+ .split(aclString));
+ ListIterator listIterator = list.listIterator();
+ while (listIterator.hasNext()) {
+ String next = listIterator.next();
+ if (next.startsWith(SCHEME_SASL +":") && next.endsWith("@")) {
+ listIterator.set(next + realm);
+ }
+ }
+ return list;
+ }
+
+ /**
+ * Parse a string down to an ID, adding a realm if needed
+ * @param idPair id:data tuple
+ * @param realm realm to add
+ * @return the ID.
+ * @throws IllegalArgumentException if the idPair is invalid
+ */
+ public Id parse(String idPair, String realm) {
+ int firstColon = idPair.indexOf(':');
+ int lastColon = idPair.lastIndexOf(':');
+ if (firstColon == -1 || lastColon == -1 || firstColon != lastColon) {
+ throw new IllegalArgumentException(
+ "ACL '" + idPair + "' not of expected form scheme:id");
+ }
+ String scheme = idPair.substring(0, firstColon);
+ String id = idPair.substring(firstColon + 1);
+ if (id.endsWith("@")) {
+ Preconditions.checkArgument(
+ StringUtils.isNotEmpty(realm),
+ "@ suffixed account but no realm %s", id);
+ id = id + realm;
+ }
+ return new Id(scheme, id);
+ }
+
+ /**
+ * Parse the IDs, adding a realm if needed, setting the permissions
+ * @param principalList id string
+ * @param realm realm to add
+ * @param perms permissions
+ * @return the relevant ACLs
+ * @throws IOException
+ */
+ public List buildACLs(String principalList, String realm, int perms)
+ throws IOException {
+ List aclPairs = splitAclPairs(principalList, realm);
+ List ids = new ArrayList(aclPairs.size());
+ for (String aclPair : aclPairs) {
+ ACL newAcl = new ACL();
+ newAcl.setId(parse(aclPair, realm));
+ newAcl.setPerms(perms);
+ ids.add(newAcl);
+ }
+ return ids;
+ }
+
+ /**
+ * Parse an ACL list. This includes configuration indirection
+ * {@link ZKUtil#resolveConfIndirection(String)}
+ * @param zkAclConf configuration string
+ * @return an ACL list
+ * @throws IOException on a bad ACL parse
+ */
+ public List parseACLs(String zkAclConf) throws IOException {
+ try {
+ return ZKUtil.parseACLs(ZKUtil.resolveConfIndirection(zkAclConf));
+ } catch (ZKUtil.BadAclFormatException e) {
+ throw new IOException("Parsing " + zkAclConf + " :" + e, e);
+ }
+ }
+
+ /**
+ * Get the appropriate Kerberos Auth module for JAAS entries
+ * for this JVM.
+ * @return a JVM-specific kerberos login module classname.
+ */
+ public static String getKerberosAuthModuleForJVM() {
+ if (System.getProperty("java.vendor").contains("IBM")) {
+ return "com.ibm.security.auth.module.Krb5LoginModule";
+ } else {
+ return "com.sun.security.auth.module.Krb5LoginModule";
+ }
+ }
+
+ /**
+ * JAAS template: {@value}
+ * Note the semicolon on the last entry
+ */
+ private static final String JAAS_ENTRY =
+ "%s { \n"
+ + " %s required\n"
+ // kerberos module
+ + " keyTab=\"%s\"\n"
+ + " principal=\"%s\"\n"
+ + " useKeyTab=true\n"
+ + " useTicketCache=false\n"
+ + " doNotPrompt=true\n"
+ + " storeKey=true;\n"
+ + "}; \n"
+ ;
+
+ /**
+ * Create a JAAS entry for insertion
+ * @param context context of the entry
+ * @param principal kerberos principal
+ * @param keytab keytab
+ * @return a context
+ */
+ public String createJAASEntry(
+ String context,
+ String principal,
+ File keytab) {
+ Preconditions.checkArgument(StringUtils.isNotEmpty(principal),
+ "invalid principal");
+ Preconditions.checkArgument(StringUtils.isNotEmpty(context),
+ "invalid context");
+ Preconditions.checkArgument(keytab != null && keytab.isFile(),
+ "Keytab null or missing: ");
+ return String.format(
+ Locale.ENGLISH,
+ JAAS_ENTRY,
+ context,
+ getKerberosAuthModuleForJVM(),
+ keytab.getAbsolutePath(),
+ principal);
+ }
+
+ /**
+ * Bind the JVM JAS setting to the specified JAAS file.
+ *
+ * Important: once a file has been loaded the JVM doesn't pick up
+ * changes
+ * @param jaasFile the JAAS file
+ */
+ public static void bindJVMtoJAASFile(File jaasFile) {
+ String path = jaasFile.getAbsolutePath();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Binding {} to {}", Environment.JAAS_CONF_KEY, path);
+ }
+ System.setProperty(Environment.JAAS_CONF_KEY, path);
+ }
+
+ /**
+ * Set the Zookeeper server property
+ * {@link ZookeeperConfigOptions#PROP_ZK_SERVER_SASL_CONTEXT}
+ * to the SASL context. When the ZK server starts, this is the context
+ * which it will read in
+ * @param contextName the name of the context
+ */
+ public static void bindZKToServerJAASContext(String contextName) {
+ System.setProperty(PROP_ZK_SERVER_SASL_CONTEXT, contextName);
+ }
+
+ /**
+ * Reset any system properties related to JAAS
+ */
+ public static void clearJaasSystemProperties() {
+ System.clearProperty(Environment.JAAS_CONF_KEY);
+ }
+
+ /**
+ * Resolve the context of an entry. This is an effective test of
+ * JAAS setup, because it will relay detected problems up
+ * @param context context name
+ * @return the entry
+ * @throws RuntimeException if there is no context entry found
+ */
+ public static AppConfigurationEntry[] validateContext(String context) {
+ if (context == null) {
+ throw new RuntimeException("Null context argument");
+ }
+ if (context.isEmpty()) {
+ throw new RuntimeException("Empty context argument");
+ }
+ javax.security.auth.login.Configuration configuration =
+ javax.security.auth.login.Configuration.getConfiguration();
+ AppConfigurationEntry[] entries =
+ configuration.getAppConfigurationEntry(context);
+ if (entries == null) {
+ throw new RuntimeException(
+ String.format("Entry \"%s\" not found; " +
+ "JAAS config = %s",
+ context,
+ describeProperty(Environment.JAAS_CONF_KEY) ));
+ }
+ return entries;
+ }
+
+ /**
+ * Apply the security environment to this curator instance. This
+ * may include setting up the ZK system properties for SASL
+ * @param builder curator builder
+ */
+ public void applySecurityEnvironment(CuratorFrameworkFactory.Builder builder) {
+
+ if (isSecureRegistry()) {
+ switch (access) {
+ case anon:
+ clearZKSaslClientProperties();
+ break;
+
+ case digest:
+ // no SASL
+ clearZKSaslClientProperties();
+ builder.authorization(SCHEME_DIGEST, digestAuthData);
+ break;
+
+ case sasl:
+ // bind to the current identity and context within the JAAS file
+ setZKSaslClientProperties(jaasClientIdentity, jaasClientContext);
+ }
+ }
+ }
+
+ /**
+ * Set the client properties. This forces the ZK client into
+ * failing if it can't auth.
+ * Important:This is JVM-wide.
+ * @param username username
+ * @param context login context
+ * @throws RuntimeException if the context cannot be found in the current
+ * JAAS context
+ */
+ public static void setZKSaslClientProperties(String username,
+ String context) {
+ RegistrySecurity.validateContext(context);
+ enableZookeeperClientSASL();
+ System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, username);
+ System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+ }
+
+ /**
+ * Clear all the ZK SASL Client properties
+ * Important:This is JVM-wide
+ */
+ public static void clearZKSaslClientProperties() {
+ disableZookeeperClientSASL();
+ System.clearProperty(PROP_ZK_SASL_CLIENT_CONTEXT);
+ System.clearProperty(PROP_ZK_SASL_CLIENT_USERNAME);
+ }
+
+ /**
+ * Turn ZK SASL on
+ * Important:This is JVM-wide
+ */
+ protected static void enableZookeeperClientSASL() {
+ System.setProperty(PROP_ZK_ENABLE_SASL_CLIENT, "true");
+ }
+
+ /**
+ * Force disable ZK SASL bindings.
+ * Important:This is JVM-wide
+ */
+ public static void disableZookeeperClientSASL() {
+ System.setProperty(ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY, "false");
+ }
+
+ /**
+ * Is the system property enabling the SASL client set?
+ * @return true if the SASL client system property is set.
+ */
+ public static boolean isClientSASLEnabled() {
+ return ZooKeeperSaslClient.isEnabled();
+ }
+
+ /**
+ * Log details about the current Hadoop user at INFO.
+ * Robust against IOEs when trying to get the current user
+ */
+ public void logCurrentHadoopUser() {
+ try {
+ UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+ LOG.info("Current user = {}",currentUser);
+ UserGroupInformation realUser = currentUser.getRealUser();
+ LOG.info("Real User = {}" , realUser);
+ } catch (IOException e) {
+ LOG.warn("Failed to get current user {}, {}", e);
+ }
+ }
+
+ /**
+ * Stringify a list of ACLs for logging. Digest ACLs have their
+ * digest values stripped for security.
+ * @param acls ACL list
+ * @return a string for logs, exceptions, ...
+ */
+ public static String aclsToString(List acls) {
+ StringBuilder builder = new StringBuilder();
+ if (acls == null) {
+ builder.append("null ACL");
+ } else {
+ builder.append('\n');
+ for (ACL acl : acls) {
+ builder.append(aclToString(acl))
+ .append(" ");
+ }
+ }
+ return builder.toString();
+ }
+
+ /**
+ * Convert an ACL to a string, with any obfuscation needed
+ * @param acl ACL
+ * @return ACL string value
+ */
+ public static String aclToString(ACL acl) {
+ return String.format(Locale.ENGLISH,
+ "0x%02x: %s",
+ acl.getPerms(),
+ idToString(acl.getId())
+ );
+ }
+
+ /**
+ * Convert an ID to a string, stripping out all but the first few characters
+ * of any digest auth hash for security reasons
+ * @param id ID
+ * @return a string description of a Zookeeper ID
+ */
+ public static String idToString(Id id) {
+ String s;
+ if (id.getScheme().equals(SCHEME_DIGEST)) {
+ String ids = id.getId();
+ int colon = ids.indexOf(':');
+ if (colon > 0) {
+ ids = ids.substring(colon + 3);
+ }
+ s = SCHEME_DIGEST + ": " + ids;
+ } else {
+ s = id.toString();
+ }
+ return s;
+ }
+
+ /**
+ * Build up low-level security diagnostics to aid debugging
+ * @return a string to use in diagnostics
+ */
+ public String buildSecurityDiagnostics() {
+ StringBuilder builder = new StringBuilder();
+ builder.append(secureRegistry ? "secure registry; "
+ : "insecure registry; ");
+ builder.append("Access policy: ").append(access);
+
+ builder.append(", System ACLs: ").append(aclsToString(systemACLs));
+ builder.append(UgiInfo.fromCurrentUser());
+ builder.append(" Kerberos Realm: ").append(kerberosRealm).append(" ; ");
+ builder.append(describeProperty(Environment.JAAS_CONF_KEY));
+ String sasl =
+ System.getProperty(PROP_ZK_ENABLE_SASL_CLIENT,
+ DEFAULT_ZK_ENABLE_SASL_CLIENT);
+ boolean saslEnabled = Boolean.valueOf(sasl);
+ builder.append(describeProperty(PROP_ZK_ENABLE_SASL_CLIENT,
+ DEFAULT_ZK_ENABLE_SASL_CLIENT));
+ if (saslEnabled) {
+ builder.append("JAAS Client Identity")
+ .append("=")
+ .append(jaasClientIdentity)
+ .append("; ");
+ builder.append(KEY_REGISTRY_CLIENT_JAAS_CONTEXT)
+ .append("=")
+ .append(jaasClientContext)
+ .append("; ");
+ builder.append(describeProperty(PROP_ZK_SASL_CLIENT_USERNAME));
+ builder.append(describeProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
+ }
+ builder.append(describeProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS,
+ "(undefined but defaults to true)"));
+ builder.append(describeProperty(
+ PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE));
+ return builder.toString();
+ }
+
+ private static String describeProperty(String name) {
+ return describeProperty(name, "(undefined)");
+ }
+
+ private static String describeProperty(String name, String def) {
+ return "; " + name + "=" + System.getProperty(name, def);
+ }
+
+ /**
+ * Get the default kerberos realm —returning "" if there
+ * is no realm or other problem
+ * @return the default realm of the system if it
+ * could be determined
+ */
+ public static String getDefaultRealmInJVM() {
+ try {
+ return KerberosUtil.getDefaultRealm();
+ // JDK7
+ } catch (ClassNotFoundException ignored) {
+ // ignored
+ } catch (NoSuchMethodException ignored) {
+ // ignored
+ } catch (IllegalAccessException ignored) {
+ // ignored
+ } catch (InvocationTargetException ignored) {
+ // ignored
+ }
+ return "";
+ }
+
+ /**
+ * Create an ACL For a user.
+ * @param ugi User identity
+ * @return the ACL For the specified user. Ifthe username doesn't end
+ * in "@" then the realm is added
+ */
+ public ACL createACLForUser(UserGroupInformation ugi, int perms) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating ACL For ", new UgiInfo(ugi));
+ }
+ if (!secureRegistry) {
+ return ALL_READWRITE_ACCESS;
+ } else {
+ return createACLfromUsername(ugi.getUserName(), perms);
+ }
+ }
+
+ /**
+ * Given a user name (short or long), create a SASL ACL
+ * @param username user name; if it doesn't contain an "@" symbol, the
+ * service's kerberos realm is added
+ * @param perms permissions
+ * @return an ACL for the user
+ */
+ public ACL createACLfromUsername(String username, int perms) {
+ if (!username.contains("@")) {
+ username = username + "@" + kerberosRealm;
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Appending kerberos realm to make {}", username);
+ }
+ }
+ return new ACL(perms, new Id(SCHEME_SASL, username));
+ }
+
+ /**
+ * On demand string-ifier for UGI with extra details
+ */
+ public static class UgiInfo {
+
+ public static UgiInfo fromCurrentUser() {
+ try {
+ return new UgiInfo(UserGroupInformation.getCurrentUser());
+ } catch (IOException e) {
+ LOG.info("Failed to get current user {}", e, e);
+ return new UgiInfo(null);
+ }
+ }
+
+ private final UserGroupInformation ugi;
+
+ public UgiInfo(UserGroupInformation ugi) {
+ this.ugi = ugi;
+ }
+
+ @Override
+ public String toString() {
+ if (ugi==null) {
+ return "(null ugi)";
+ }
+ StringBuilder builder = new StringBuilder();
+ builder.append(ugi.getUserName()).append(": ");
+ builder.append(ugi.toString());
+ builder.append(" hasKerberosCredentials=").append(
+ ugi.hasKerberosCredentials());
+ builder.append(" isFromKeytab=").append(ugi.isFromKeytab());
+ builder.append(" kerberos is enabled in Hadoop =").append(UserGroupInformation.isSecurityEnabled());
+ return builder.toString();
+ }
+
+ }
+
+ /**
+ * on-demand stringifier for a list of ACLs
+ */
+ public static class AclListInfo {
+ public final List acls;
+
+ public AclListInfo(List acls) {
+ this.acls = acls;
+ }
+
+ @Override
+ public String toString() {
+ return aclsToString(acls);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java
new file mode 100644
index 0000000000..3c4a730608
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.api.GetChildrenBuilder;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
+
+import java.util.List;
+
+/**
+ * This class dumps a registry tree to a string.
+ * It does this in the toString() method, so it
+ * can be used in a log statement -the operation
+ * will only take place if the method is evaluated.
+ *
+ */
+@VisibleForTesting
+public class ZKPathDumper {
+
+ public static final int INDENT = 2;
+ private final CuratorFramework curator;
+ private final String root;
+ private final boolean verbose;
+
+ /**
+ * Create a path dumper -but do not dump the path until asked
+ * @param curator curator instance
+ * @param root root
+ * @param verbose verbose flag - includes more details (such as ACLs)
+ */
+ public ZKPathDumper(CuratorFramework curator,
+ String root,
+ boolean verbose) {
+ Preconditions.checkArgument(curator != null);
+ Preconditions.checkArgument(root != null);
+ this.curator = curator;
+ this.root = root;
+ this.verbose = verbose;
+ }
+
+ /**
+ * Trigger the recursive registry dump.
+ * @return a string view of the registry
+ */
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("ZK tree for ").append(root).append('\n');
+ expand(builder, root, 1);
+ return builder.toString();
+ }
+
+ /**
+ * Recursively expand the path into the supplied string builder, increasing
+ * the indentation by {@link #INDENT} as it proceeds (depth first) down
+ * the tree
+ * @param builder string build to append to
+ * @param path path to examine
+ * @param indent current indentation
+ */
+ private void expand(StringBuilder builder,
+ String path,
+ int indent) {
+ try {
+ GetChildrenBuilder childrenBuilder = curator.getChildren();
+ List children = childrenBuilder.forPath(path);
+ for (String child : children) {
+ String childPath = path + "/" + child;
+ String body;
+ Stat stat = curator.checkExists().forPath(childPath);
+ StringBuilder bodyBuilder = new StringBuilder(256);
+ bodyBuilder.append(" [")
+ .append(stat.getDataLength())
+ .append("]");
+ if (stat.getEphemeralOwner() > 0) {
+ bodyBuilder.append("*");
+ }
+ if (verbose) {
+ // verbose: extract ACLs
+ builder.append(" -- ");
+ List acls =
+ curator.getACL().forPath(childPath);
+ for (ACL acl : acls) {
+ builder.append(RegistrySecurity.aclToString(acl));
+ builder.append(" ");
+ }
+ }
+ body = bodyBuilder.toString();
+ // print each child
+ append(builder, indent, ' ');
+ builder.append('/').append(child);
+ builder.append(body);
+ builder.append('\n');
+ // recurse
+ expand(builder, childPath, indent + INDENT);
+ }
+ } catch (Exception e) {
+ builder.append(e.toString()).append("\n");
+ }
+ }
+
+ /**
+ * Append the specified indentation to a builder
+ * @param builder string build to append to
+ * @param indent current indentation
+ * @param c charactor to use for indentation
+ */
+ private void append(StringBuilder builder, int indent, char c) {
+ for (int i = 0; i < indent; i++) {
+ builder.append(c);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java
new file mode 100644
index 0000000000..711e27c9c1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl.zk;
+
+import org.apache.zookeeper.client.ZooKeeperSaslClient;
+import org.apache.zookeeper.server.ZooKeeperSaslServer;
+
+/**
+ * Configuration options which are internal to Zookeeper,
+ * as well as some other ZK constants
+ *
+ * Zookeeper options are passed via system properties prior to the ZK
+ * Methods/classes being invoked. This implies that:
+ *
+ *
There can only be one instance of a ZK client or service class
+ * in a single JVM —else their configuration options will conflict.
+ *
It is safest to set these properties immediately before
+ * invoking ZK operations.
+ *
+ *
+ */
+public interface ZookeeperConfigOptions {
+ /**
+ * Enable SASL secure clients: {@value}.
+ * This is usually set to true, with ZK set to fall back to
+ * non-SASL authentication if the SASL auth fails
+ * by the property
+ * {@link #PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE}.
+ *
+ * As a result, clients will default to attempting SASL-authentication,
+ * but revert to classic authentication/anonymous access on failure.
+ */
+ String PROP_ZK_ENABLE_SASL_CLIENT =
+ ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY;
+
+ /**
+ * Default flag for the ZK client: {@value}.
+ */
+ String DEFAULT_ZK_ENABLE_SASL_CLIENT = "true";
+
+ /**
+ * System property for the JAAS client context : {@value}.
+ *
+ * For SASL authentication to work, this must point to a
+ * context within the
+ *
+ *
+ * Default value is derived from
+ * {@link ZooKeeperSaslClient#LOGIN_CONTEXT_NAME_KEY}
+ */
+ String PROP_ZK_SASL_CLIENT_CONTEXT =
+ ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY;
+
+ /**
+ * The SASL client username: {@value}.
+ *
+ * Set this to the short name of the client, e.g, "user",
+ * not user/host, or user/host@REALM
+ */
+ String PROP_ZK_SASL_CLIENT_USERNAME = "zookeeper.sasl.client.username";
+
+ /**
+ * The SASL Server context, referring to a context in the JVM's
+ * JAAS context file: {@value}
+ *
+ */
+ String PROP_ZK_SERVER_SASL_CONTEXT =
+ ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY;
+
+ /**
+ * Should ZK Server allow failed SASL clients to downgrade to classic
+ * authentication on a SASL auth failure: {@value}.
+ */
+ String PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE =
+ "zookeeper.maintain_connection_despite_sasl_failure";
+
+ /**
+ * should the ZK Server Allow failed SASL clients: {@value}.
+ */
+ String PROP_ZK_ALLOW_FAILED_SASL_CLIENTS =
+ "zookeeper.allowSaslFailedClients";
+
+ /**
+ * Kerberos realm of the server: {@value}.
+ */
+ String PROP_ZK_SERVER_REALM = "zookeeper.server.realm";
+
+ /**
+ * Path to a kinit binary: {@value}.
+ * Defaults to "/usr/bin/kinit"
+ */
+ String PROP_ZK_KINIT_PATH = "zookeeper.kinit";
+
+ /**
+ * ID scheme for SASL: {@value}.
+ */
+ String SCHEME_SASL = "sasl";
+
+ /**
+ * ID scheme for digest auth: {@value}.
+ */
+ String SCHEME_DIGEST = "digest";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java
new file mode 100644
index 0000000000..f7ae98372d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Core Zookeeper support.
+ *
+ * This package contains the low-level bindings to Curator and Zookeeper,
+ * including everything related to registry security.
+ *
+ * The class {@link org.apache.hadoop.registry.client.impl.zk.CuratorService}
+ * is a YARN service which offers access to a Zookeeper instance via
+ * Apache Curator.
+ *
+ * The {@link org.apache.hadoop.registry.client.impl.zk.RegistrySecurity}
+ * implements the security support in the registry, though a set of
+ * static methods and as a YARN service.
+ *
+ * To work with ZK, system properties need to be set before invoking
+ * some operations/instantiating some objects. The definitions of these
+ * are kept in {@link org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions}.
+ *
+ *
+ */
+package org.apache.hadoop.registry.client.impl.zk;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/AddressTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/AddressTypes.java
new file mode 100644
index 0000000000..192819c8d7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/AddressTypes.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Enum of address types -as integers.
+ * Why integers and not enums? Cross platform serialization as JSON
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface AddressTypes {
+
+ /**
+ * hostname/FQDN and port pair: {@value}.
+ * The host/domain name and port are set as separate strings in the address
+ * list, e.g.
+ *
+ * ["namenode.example.org", "50070"]
+ *
+ */
+ public static final String ADDRESS_HOSTNAME_AND_PORT = "host/port";
+
+
+ /**
+ * Path /a/b/c style: {@value}.
+ * The entire path is encoded in a single entry
+ *
+ *
+ * ["/users/example/dataset"]
+ *
+ */
+ public static final String ADDRESS_PATH = "path";
+
+
+
+ /**
+ * URI entries: {@value}.
+ *
+ * ["http://example.org"]
+ *
+ */
+ public static final String ADDRESS_URI = "uri";
+
+ /**
+ * Zookeeper addresses as a triple : {@value}.
+ *
+ * These are provide as a 3 element tuple of: hostname, port
+ * and optionally path (depending on the application)
+ *
+ * A single element would be
+ *
+ * ["zk1","2181","/registry"]
+ *
+ * An endpoint with multiple elements would list them as
+ *
+ *
+ * the third element in each entry , the path, MUST be the same in each entry.
+ * A client reading the addresses of an endpoint is free to pick any
+ * of the set, so they must be the same.
+ *
+ */
+ public static final String ADDRESS_ZOOKEEPER = "zktriple";
+
+ /**
+ * Any other address: {@value}.
+ */
+ public static final String ADDRESS_OTHER = "";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java
new file mode 100644
index 0000000000..51418d9c9e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Description of a single service/component endpoint.
+ * It is designed to be marshalled as JSON.
+ *
+ * Every endpoint can have more than one address entry, such as
+ * a list of URLs to a replicated service, or a (hostname, port)
+ * pair. Each of these address entries is represented as a string list,
+ * as that is the only reliably marshallable form of a tuple JSON can represent.
+ *
+ *
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public final class Endpoint implements Cloneable {
+
+ /**
+ * API implemented at the end of the binding
+ */
+ public String api;
+
+ /**
+ * Type of address. The standard types are defined in
+ * {@link AddressTypes}
+ */
+ public String addressType;
+
+ /**
+ * Protocol type. Some standard types are defined in
+ * {@link ProtocolTypes}
+ */
+ public String protocolType;
+
+ /**
+ * a list of address tuples —tuples whose format depends on the address type
+ */
+ public List> addresses;
+
+ /**
+ * Create an empty instance.
+ */
+ public Endpoint() {
+ }
+
+ /**
+ * Create an endpoint from another endpoint.
+ * This is a deep clone with a new list of addresses.
+ * @param that the endpoint to copy from
+ */
+ public Endpoint(Endpoint that) {
+ this.api = that.api;
+ this.addressType = that.addressType;
+ this.protocolType = that.protocolType;
+ this.addresses = new ArrayList>(that.addresses.size());
+ for (List address : addresses) {
+ List addr2 = new ArrayList(address.size());
+ Collections.copy(address, addr2);
+ }
+ }
+
+ /**
+ * Build an endpoint with a list of addresses
+ * @param api API name
+ * @param addressType address type
+ * @param protocolType protocol type
+ * @param addrs addresses
+ */
+ public Endpoint(String api,
+ String addressType,
+ String protocolType,
+ List> addrs) {
+ this.api = api;
+ this.addressType = addressType;
+ this.protocolType = protocolType;
+ this.addresses = new ArrayList>();
+ if (addrs != null) {
+ addresses.addAll(addrs);
+ }
+ }
+
+ /**
+ * Build an endpoint from a list of URIs; each URI
+ * is ASCII-encoded and added to the list of addresses.
+ * @param api API name
+ * @param protocolType protocol type
+ * @param uris URIs to convert to a list of tup;les
+ */
+ public Endpoint(String api,
+ String protocolType,
+ URI... uris) {
+ this.api = api;
+ this.addressType = AddressTypes.ADDRESS_URI;
+
+ this.protocolType = protocolType;
+ List> addrs = new ArrayList>(uris.length);
+ for (URI uri : uris) {
+ addrs.add(RegistryTypeUtils.tuple(uri.toString()));
+ }
+ this.addresses = addrs;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder("Endpoint{");
+ sb.append("api='").append(api).append('\'');
+ sb.append(", addressType='").append(addressType).append('\'');
+ sb.append(", protocolType='").append(protocolType).append('\'');
+
+ sb.append(", addresses=");
+ if (addresses != null) {
+ sb.append("[ ");
+ for (List address : addresses) {
+ sb.append("[ ");
+ if (address == null) {
+ sb.append("NULL entry in address list");
+ } else {
+ for (String elt : address) {
+ sb.append('"').append(elt).append("\" ");
+ }
+ }
+ sb.append("] ");
+ };
+ sb.append("] ");
+ } else {
+ sb.append("(null) ");
+ }
+ sb.append('}');
+ return sb.toString();
+ }
+
+ /**
+ * Validate the record by checking for null fields and other invalid
+ * conditions
+ * @throws NullPointerException if a field is null when it
+ * MUST be set.
+ * @throws RuntimeException on invalid entries
+ */
+ public void validate() {
+ Preconditions.checkNotNull(api, "null API field");
+ Preconditions.checkNotNull(addressType, "null addressType field");
+ Preconditions.checkNotNull(protocolType, "null protocolType field");
+ Preconditions.checkNotNull(addresses, "null addresses field");
+ for (List address : addresses) {
+ Preconditions.checkNotNull(address, "null element in address");
+ }
+ }
+
+ /**
+ * Shallow clone: the lists of addresses are shared
+ * @return a cloned instance
+ * @throws CloneNotSupportedException
+ */
+ @Override
+ protected Object clone() throws CloneNotSupportedException {
+ return super.clone();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java
new file mode 100644
index 0000000000..f225cf0877
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * some common protocol types
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ProtocolTypes {
+
+ /**
+ * Addresses are URIs of Hadoop Filesystem paths: {@value}.
+ */
+ String PROTOCOL_FILESYSTEM = "hadoop/filesystem";
+
+ /**
+ * Classic Hadoop IPC : {@value}.
+ */
+ String PROTOCOL_HADOOP_IPC = "hadoop/IPC";
+
+ /**
+ * Hadoop protocol buffers IPC: {@value}.
+ */
+ String PROTOCOL_HADOOP_IPC_PROTOBUF = "hadoop/protobuf";
+
+ /**
+ * Corba IIOP: {@value}.
+ */
+ String PROTOCOL_IIOP = "IIOP";
+
+ /**
+ * REST: {@value}.
+ */
+ String PROTOCOL_REST = "REST";
+
+ /**
+ * Java RMI: {@value}.
+ */
+ String PROTOCOL_RMI = "RMI";
+
+ /**
+ * SunOS RPC, as used by NFS and similar: {@value}.
+ */
+ String PROTOCOL_SUN_RPC = "sunrpc";
+
+ /**
+ * Thrift-based protocols: {@value}.
+ */
+ String PROTOCOL_THRIFT = "thrift";
+
+ /**
+ * Custom TCP protocol: {@value}.
+ */
+ String PROTOCOL_TCP = "tcp";
+
+ /**
+ * Custom UPC-based protocol : {@value}.
+ */
+ String PROTOCOL_UDP = "udp";
+
+ /**
+ * Default value —the protocol is unknown : "{@value}"
+ */
+ String PROTOCOL_UNKNOWN = "";
+
+ /**
+ * Web page: {@value}.
+ *
+ * This protocol implies that the URLs are designed for
+ * people to view via web browsers.
+ */
+ String PROTOCOL_WEBUI = "webui";
+
+ /**
+ * Web Services: {@value}.
+ */
+ String PROTOCOL_WSAPI = "WS-*";
+
+ /**
+ * A zookeeper binding: {@value}.
+ */
+ String PROTOCOL_ZOOKEEPER_BINDING = "zookeeper";
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java
new file mode 100644
index 0000000000..59bcadce37
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Output of a RegistryOperations.stat() call
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@JsonIgnoreProperties(ignoreUnknown = true)
+public final class RegistryPathStatus {
+
+ /**
+ * Short path in the registry to this entry
+ */
+ public final String path;
+
+ /**
+ * Timestamp
+ */
+ public final long time;
+
+ /**
+ * Entry size in bytes, as returned by the storage infrastructure.
+ * In zookeeper, even "empty" nodes have a non-zero size.
+ */
+ public final long size;
+
+ /**
+ * Number of child nodes
+ */
+ public final int children;
+
+ /**
+ * Construct an instance
+ * @param path full path
+ * @param time time
+ * @param size entry size
+ * @param children number of children
+ */
+ public RegistryPathStatus(
+ @JsonProperty("path") String path,
+ @JsonProperty("time") long time,
+ @JsonProperty("size") long size,
+ @JsonProperty("children") int children) {
+ this.path = path;
+ this.time = time;
+ this.size = size;
+ this.children = children;
+ }
+
+ /**
+ * Equality operator checks size, time and path of the entries.
+ * It does not check {@link #children}.
+ * @param other the other entry
+ * @return true if the entries are considered equal.
+ */
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (other == null || getClass() != other.getClass()) {
+ return false;
+ }
+
+ RegistryPathStatus status = (RegistryPathStatus) other;
+
+ if (size != status.size) {
+ return false;
+ }
+ if (time != status.time) {
+ return false;
+ }
+ if (path != null ? !path.equals(status.path) : status.path != null) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * The hash code is derived from the path.
+ * @return hash code for storing the path in maps.
+ */
+ @Override
+ public int hashCode() {
+ return path != null ? path.hashCode() : 0;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb =
+ new StringBuilder("RegistryPathStatus{");
+ sb.append("path='").append(path).append('\'');
+ sb.append(", time=").append(time);
+ sb.append(", size=").append(size);
+ sb.append(", children=").append(children);
+ sb.append('}');
+ return sb.toString();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java
new file mode 100644
index 0000000000..378127fc02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonAnyGetter;
+import org.codehaus.jackson.annotate.JsonAnySetter;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * JSON-marshallable description of a single component.
+ * It supports the deserialization of unknown attributes, but does
+ * not support their creation.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class ServiceRecord implements Cloneable {
+
+ /**
+ * Description string
+ */
+ public String description;
+
+ /**
+ * map to handle unknown attributes.
+ */
+ private Map attributes = new HashMap(4);
+
+ /**
+ * List of endpoints intended for use to external callers
+ */
+ public List external = new ArrayList();
+
+ /**
+ * List of endpoints for use within an application.
+ */
+ public List internal = new ArrayList();
+
+ /**
+ * Create a service record with no ID, description or registration time.
+ * Endpoint lists are set to empty lists.
+ */
+ public ServiceRecord() {
+ }
+
+ /**
+ * Deep cloning constructor
+ * @param that service record source
+ */
+ public ServiceRecord(ServiceRecord that) {
+ this.description = that.description;
+ // others
+ Map thatAttrs = that.attributes;
+ for (Map.Entry entry : thatAttrs.entrySet()) {
+ attributes.put(entry.getKey(), entry.getValue());
+ }
+ // endpoints
+ List src = that.internal;
+ if (src != null) {
+ internal = new ArrayList(src.size());
+ for (Endpoint endpoint : src) {
+ internal.add(new Endpoint(endpoint));
+ }
+ }
+ src = that.external;
+ if (src != null) {
+ external = new ArrayList(src.size());
+ for (Endpoint endpoint : src) {
+ external.add(new Endpoint(endpoint));
+ }
+ }
+ }
+
+ /**
+ * Add an external endpoint
+ * @param endpoint endpoint to set
+ */
+ public void addExternalEndpoint(Endpoint endpoint) {
+ Preconditions.checkArgument(endpoint != null);
+ endpoint.validate();
+ external.add(endpoint);
+ }
+
+ /**
+ * Add an internal endpoint
+ * @param endpoint endpoint to set
+ */
+ public void addInternalEndpoint(Endpoint endpoint) {
+ Preconditions.checkArgument(endpoint != null);
+ endpoint.validate();
+ internal.add(endpoint);
+ }
+
+ /**
+ * Look up an internal endpoint
+ * @param api API
+ * @return the endpoint or null if there was no match
+ */
+ public Endpoint getInternalEndpoint(String api) {
+ return findByAPI(internal, api);
+ }
+
+ /**
+ * Look up an external endpoint
+ * @param api API
+ * @return the endpoint or null if there was no match
+ */
+ public Endpoint getExternalEndpoint(String api) {
+ return findByAPI(external, api);
+ }
+
+ /**
+ * Handle unknown attributes by storing them in the
+ * {@link #attributes} map
+ * @param key attribute name
+ * @param value attribute value.
+ */
+ @JsonAnySetter
+ public void set(String key, Object value) {
+ attributes.put(key, value.toString());
+ }
+
+ /**
+ * The map of "other" attributes set when parsing. These
+ * are not included in the JSON value of this record when it
+ * is generated.
+ * @return a map of any unknown attributes in the deserialized JSON.
+ */
+ @JsonAnyGetter
+ public Map attributes() {
+ return attributes;
+ }
+
+ /**
+ * Get the "other" attribute with a specific key
+ * @param key key to look up
+ * @return the value or null
+ */
+ public String get(String key) {
+ return attributes.get(key);
+ }
+
+ /**
+ * Get the "other" attribute with a specific key.
+ * @param key key to look up
+ * @param defVal default value
+ * @return the value as a string,
+ * or defval if the value was not present
+ */
+ public String get(String key, String defVal) {
+ String val = attributes.get(key);
+ return val != null ? val: defVal;
+ }
+
+ /**
+ * Find an endpoint by its API
+ * @param list list
+ * @param api api name
+ * @return the endpoint or null if there was no match
+ */
+ private Endpoint findByAPI(List list, String api) {
+ for (Endpoint endpoint : list) {
+ if (endpoint.api.equals(api)) {
+ return endpoint;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb =
+ new StringBuilder("ServiceRecord{");
+ sb.append("description='").append(description).append('\'');
+ sb.append("; external endpoints: {");
+ for (Endpoint endpoint : external) {
+ sb.append(endpoint).append("; ");
+ }
+ sb.append("}; internal endpoints: {");
+ for (Endpoint endpoint : internal) {
+ sb.append(endpoint != null ? endpoint.toString() : "NULL ENDPOINT");
+ sb.append("; ");
+ }
+ sb.append('}');
+
+ if (!attributes.isEmpty()) {
+ sb.append(", attributes: {");
+ for (Map.Entry attr : attributes.entrySet()) {
+ sb.append("\"").append(attr.getKey()).append("\"=\"")
+ .append(attr.getValue()).append("\" ");
+ }
+ } else {
+
+ sb.append(", attributes: {");
+ }
+ sb.append('}');
+
+ sb.append('}');
+ return sb.toString();
+ }
+
+ /**
+ * Shallow clone: all endpoints will be shared across instances
+ * @return a clone of the instance
+ * @throws CloneNotSupportedException
+ */
+ @Override
+ protected Object clone() throws CloneNotSupportedException {
+ return super.clone();
+ }
+
+ /**
+ * Validate the record by checking for null fields and other invalid
+ * conditions
+ * @throws NullPointerException if a field is null when it
+ * MUST be set.
+ * @throws RuntimeException on invalid entries
+ */
+ public void validate() {
+ for (Endpoint endpoint : external) {
+ Preconditions.checkNotNull("null endpoint", endpoint);
+ endpoint.validate();
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecordHeader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecordHeader.java
new file mode 100644
index 0000000000..2f75dba5a3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecordHeader.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Service record header; access to the byte array kept private
+ * to avoid findbugs warnings of mutability
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ServiceRecordHeader {
+ /**
+ * Header of a service record: "jsonservicerec"
+ * By making this over 12 bytes long, we can auto-determine which entries
+ * in a listing are too short to contain a record without getting their data
+ */
+ private static final byte[] RECORD_HEADER = {
+ 'j', 's', 'o', 'n',
+ 's', 'e', 'r', 'v', 'i', 'c', 'e',
+ 'r', 'e', 'c'
+ };
+
+ /**
+ * Get the length of the record header
+ * @return the header length
+ */
+ public static int getLength() {
+ return RECORD_HEADER.length;
+ }
+
+ /**
+ * Get a clone of the record header
+ * @return the new record header.
+ */
+ public static byte[] getData() {
+ byte[] h = new byte[RECORD_HEADER.length];
+ System.arraycopy(RECORD_HEADER, 0, h, 0, RECORD_HEADER.length);
+ return h;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java
new file mode 100644
index 0000000000..1c926be00b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains all the data types which can be saved to the registry
+ * and/or marshalled to and from JSON.
+ *
+ * The core datatypes, {@link org.apache.hadoop.registry.client.types.ServiceRecord},
+ * and {@link org.apache.hadoop.registry.client.types.Endpoint} are
+ * what is used to describe services and their protocol endpoints in the registry.
+ *
+ * Some adjacent interfaces exist to list attributes of the fields:
+ *
+ *
+ * The {@link org.apache.hadoop.registry.client.types.RegistryPathStatus}
+ * class is not saved to the registry —it is the status of a registry
+ * entry that can be retrieved from the API call. It is still
+ * designed to be marshalled to and from JSON, as it can be served up
+ * from REST front ends to the registry.
+ *
+ */
+package org.apache.hadoop.registry.client.types;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java
new file mode 100644
index 0000000000..e4c7272db6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types.yarn;
+
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+
+/**
+ * Persistence policies for {@link ServiceRecord}
+ */
+
+public interface PersistencePolicies {
+
+ /**
+ * The record persists until removed manually: {@value}.
+ */
+ String PERMANENT = "permanent";
+
+ /**
+ * Remove when the YARN application defined in the id field
+ * terminates: {@value}.
+ */
+ String APPLICATION = "application";
+
+ /**
+ * Remove when the current YARN application attempt ID finishes: {@value}.
+ */
+ String APPLICATION_ATTEMPT = "application-attempt";
+
+ /**
+ * Remove when the YARN container in the ID field finishes: {@value}
+ */
+ String CONTAINER = "container";
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java
new file mode 100644
index 0000000000..7b78932452
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.types.yarn;
+
+/**
+ * YARN specific attributes in the registry
+ */
+public class YarnRegistryAttributes {
+
+ /**
+ * ID. For containers: container ID. For application instances, application ID.
+ */
+ public static final String YARN_ID = "yarn:id";
+ public static final String YARN_PERSISTENCE = "yarn:persistence";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java
new file mode 100644
index 0000000000..e11890f85c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.server.integration;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.curator.framework.api.BackgroundCallback;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
+import org.apache.hadoop.registry.server.services.DeleteCompletionCallback;
+import org.apache.hadoop.registry.server.services.RegistryAdminService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.concurrent.Future;
+
+/**
+ * Handle RM events by updating the registry
+ *
+ * These actions are all implemented as event handlers to operations
+ * which come from the RM.
+ *
+ * This service is expected to be executed by a user with the permissions
+ * to manipulate the entire registry,
+ */
+@InterfaceAudience.LimitedPrivate("YARN")
+@InterfaceStability.Evolving
+public class RMRegistryOperationsService extends RegistryAdminService {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RMRegistryOperationsService.class);
+
+ private PurgePolicy purgeOnCompletionPolicy = PurgePolicy.PurgeAll;
+
+ public RMRegistryOperationsService(String name) {
+ this(name, null);
+ }
+
+ public RMRegistryOperationsService(String name,
+ RegistryBindingSource bindingSource) {
+ super(name, bindingSource);
+ }
+
+
+ /**
+ * Extend the parent service initialization by verifying that the
+ * service knows —in a secure cluster— the realm in which it is executing.
+ * It needs this to properly build up the user names and hence their
+ * access rights.
+ *
+ * @param conf configuration of the service
+ * @throws Exception
+ */
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+ super.serviceInit(conf);
+
+ verifyRealmValidity();
+ }
+
+ public PurgePolicy getPurgeOnCompletionPolicy() {
+ return purgeOnCompletionPolicy;
+ }
+
+ public void setPurgeOnCompletionPolicy(PurgePolicy purgeOnCompletionPolicy) {
+ this.purgeOnCompletionPolicy = purgeOnCompletionPolicy;
+ }
+
+ public void onApplicationAttemptRegistered(ApplicationAttemptId attemptId,
+ String host, int rpcport, String trackingurl) throws IOException {
+
+ }
+
+ public void onApplicationLaunched(ApplicationId id) throws IOException {
+
+ }
+
+ /**
+ * Actions to take as an AM registers itself with the RM.
+ * @param attemptId attempt ID
+ * @throws IOException problems
+ */
+ public void onApplicationMasterRegistered(ApplicationAttemptId attemptId) throws
+ IOException {
+ }
+
+ /**
+ * Actions to take when the AM container is completed
+ * @param containerId container ID
+ * @throws IOException problems
+ */
+ public void onAMContainerFinished(ContainerId containerId) throws
+ IOException {
+ LOG.info("AM Container {} finished, purging application attempt records",
+ containerId);
+
+ // remove all application attempt entries
+ purgeAppAttemptRecords(containerId.getApplicationAttemptId());
+
+ // also treat as a container finish to remove container
+ // level records for the AM container
+ onContainerFinished(containerId);
+ }
+
+ /**
+ * remove all application attempt entries
+ * @param attemptId attempt ID
+ */
+ protected void purgeAppAttemptRecords(ApplicationAttemptId attemptId) {
+ purgeRecordsAsync("/",
+ attemptId.toString(),
+ PersistencePolicies.APPLICATION_ATTEMPT);
+ }
+
+ /**
+ * Actions to take when an application attempt is completed
+ * @param attemptId application ID
+ * @throws IOException problems
+ */
+ public void onApplicationAttemptUnregistered(ApplicationAttemptId attemptId)
+ throws IOException {
+ LOG.info("Application attempt {} unregistered, purging app attempt records",
+ attemptId);
+ purgeAppAttemptRecords(attemptId);
+ }
+
+ /**
+ * Actions to take when an application is completed
+ * @param id application ID
+ * @throws IOException problems
+ */
+ public void onApplicationCompleted(ApplicationId id)
+ throws IOException {
+ LOG.info("Application {} completed, purging application-level records",
+ id);
+ purgeRecordsAsync("/",
+ id.toString(),
+ PersistencePolicies.APPLICATION);
+ }
+
+ public void onApplicationAttemptAdded(ApplicationAttemptId appAttemptId) {
+ }
+
+ /**
+ * This is the event where the user is known, so the user directory
+ * can be created
+ * @param applicationId application ID
+ * @param user username
+ * @throws IOException problems
+ */
+ public void onStateStoreEvent(ApplicationId applicationId, String user) throws
+ IOException {
+ initUserRegistryAsync(user);
+ }
+
+ /**
+ * Actions to take when the AM container is completed
+ * @param id container ID
+ * @throws IOException problems
+ */
+ public void onContainerFinished(ContainerId id) throws IOException {
+ LOG.info("Container {} finished, purging container-level records",
+ id);
+ purgeRecordsAsync("/",
+ id.toString(),
+ PersistencePolicies.CONTAINER);
+ }
+
+ /**
+ * Queue an async operation to purge all matching records under a base path.
+ *
+ *
Uses a depth first search
+ *
A match is on ID and persistence policy, or, if policy==-1, any match
+ *
If a record matches then it is deleted without any child searches
+ *
Deletions will be asynchronous if a callback is provided
+ *
+ * @param path base path
+ * @param id ID for service record.id
+ * @param persistencePolicyMatch ID for the persistence policy to match:
+ * no match, no delete.
+ * @return a future that returns the #of records deleted
+ */
+ @VisibleForTesting
+ public Future purgeRecordsAsync(String path,
+ String id,
+ String persistencePolicyMatch) {
+
+ return purgeRecordsAsync(path,
+ id, persistencePolicyMatch,
+ purgeOnCompletionPolicy,
+ new DeleteCompletionCallback());
+ }
+
+ /**
+ * Queue an async operation to purge all matching records under a base path.
+ *
+ *
Uses a depth first search
+ *
A match is on ID and persistence policy, or, if policy==-1, any match
+ *
If a record matches then it is deleted without any child searches
+ *
Deletions will be asynchronous if a callback is provided
+ *
+ * @param path base path
+ * @param id ID for service record.id
+ * @param persistencePolicyMatch ID for the persistence policy to match:
+ * no match, no delete.
+ * @param purgePolicy how to react to children under the entry
+ * @param callback an optional callback
+ * @return a future that returns the #of records deleted
+ */
+ @VisibleForTesting
+ public Future purgeRecordsAsync(String path,
+ String id,
+ String persistencePolicyMatch,
+ PurgePolicy purgePolicy,
+ BackgroundCallback callback) {
+ LOG.info(" records under {} with ID {} and policy {}: {}",
+ path, id, persistencePolicyMatch);
+ return submit(
+ new AsyncPurge(path,
+ new SelectByYarnPersistence(id, persistencePolicyMatch),
+ purgePolicy,
+ callback));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java
new file mode 100644
index 0000000000..004be86064
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.server.integration;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.apache.hadoop.registry.server.services.RegistryAdminService;
+
+/**
+ * Select an entry by the YARN persistence policy
+ */
+public class SelectByYarnPersistence
+ implements RegistryAdminService.NodeSelector {
+ private final String id;
+ private final String targetPolicy;
+
+ public SelectByYarnPersistence(String id, String targetPolicy) {
+ Preconditions.checkArgument(!StringUtils.isEmpty(id), "id");
+ Preconditions.checkArgument(!StringUtils.isEmpty(targetPolicy),
+ "targetPolicy");
+ this.id = id;
+ this.targetPolicy = targetPolicy;
+ }
+
+ @Override
+ public boolean shouldSelect(String path,
+ RegistryPathStatus registryPathStatus,
+ ServiceRecord serviceRecord) {
+ String policy =
+ serviceRecord.get(YarnRegistryAttributes.YARN_PERSISTENCE, "");
+ return id.equals(serviceRecord.get(YarnRegistryAttributes.YARN_ID, ""))
+ && (targetPolicy.equals(policy));
+ }
+
+ @Override
+ public String toString() {
+ return String.format(
+ "Select by ID %s and policy %s: {}",
+ id, targetPolicy);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java
new file mode 100644
index 0000000000..22d8bc5cff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains the classes which integrate with the YARN resource
+ * manager.
+ */
+package org.apache.hadoop.registry.server.integration;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/package-info.java
new file mode 100644
index 0000000000..6962eb85e7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Server-side classes for the registry
+ *
+ * These are components intended to be deployed only on servers or in test
+ * JVMs, rather than on client machines.
+ *
+ * Example components are: server-side ZK support, a REST service, etc.
+ */
+package org.apache.hadoop.registry.server;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java
new file mode 100644
index 0000000000..9faede49dc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.server.services;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.service.Service;
+
+/**
+ * Composite service that exports the add/remove methods.
+ *
+ * This allows external classes to add services to these methods, after which
+ * they follow the same lifecyce.
+ *
+ * It is essential that any service added is in a state where it can be moved
+ * on with that of the parent services. Specifically, do not add an uninited
+ * service to a parent that is already inited —as the start
+ * operation will then fail
+ *
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class AddingCompositeService extends CompositeService {
+
+
+ public AddingCompositeService(String name) {
+ super(name);
+ }
+
+ @Override
+ public void addService(Service service) {
+ super.addService(service);
+ }
+
+ @Override
+ public boolean removeService(Service service) {
+ return super.removeService(service);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java
new file mode 100644
index 0000000000..e160d4a1ff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.server.services;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.api.BackgroundCallback;
+import org.apache.curator.framework.api.CuratorEvent;
+import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Curator callback for delete operations completing.
+ *
+ * This callback logs at debug and increments the event counter.
+ */
+public class DeleteCompletionCallback implements BackgroundCallback {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RMRegistryOperationsService.class);
+
+ private AtomicInteger events = new AtomicInteger(0);
+
+ @Override
+ public void processResult(CuratorFramework client,
+ CuratorEvent event) throws
+ Exception {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Delete event {}", event);
+ }
+ events.incrementAndGet();
+ }
+
+ /**
+ * Get the number of deletion events
+ * @return the count of events
+ */
+ public int getEventCount() {
+ return events.get();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java
new file mode 100644
index 0000000000..3fa0c1920d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.server.services;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.curator.ensemble.fixed.FixedEnsembleProvider;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.impl.zk.BindingInformation;
+import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
+import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
+import org.apache.zookeeper.server.ServerCnxnFactory;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+
+/**
+ * This is a small, localhost Zookeeper service instance that is contained
+ * in a YARN service...it's been derived from Apache Twill.
+ *
+ * It implements {@link RegistryBindingSource} and provides binding information,
+ * once started. Until start() is called, the hostname &
+ * port may be undefined. Accordingly, the service raises an exception in this
+ * condition.
+ *
+ * If you wish to chain together a registry service with this one under
+ * the same CompositeService, this service must be added
+ * as a child first.
+ *
+ * It also sets the configuration parameter
+ * {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM}
+ * to its connection string. Any code with access to the service configuration
+ * can view it.
+ */
+@InterfaceStability.Evolving
+public class MicroZookeeperService
+ extends AbstractService
+ implements RegistryBindingSource, RegistryConstants,
+ ZookeeperConfigOptions,
+ MicroZookeeperServiceKeys{
+
+
+ private static final Logger
+ LOG = LoggerFactory.getLogger(MicroZookeeperService.class);
+
+ private File instanceDir;
+ private File dataDir;
+ private int tickTime;
+ private int port;
+ private String host;
+ private boolean secureServer;
+
+ private ServerCnxnFactory factory;
+ private BindingInformation binding;
+ private File confDir;
+ private StringBuilder diagnostics = new StringBuilder();
+
+ /**
+ * Create an instance
+ * @param name service name
+ */
+ public MicroZookeeperService(String name) {
+ super(name);
+ }
+
+ /**
+ * Get the connection string.
+ * @return the string
+ * @throws IllegalStateException if the connection is not yet valid
+ */
+ public String getConnectionString() {
+ Preconditions.checkState(factory != null, "service not started");
+ InetSocketAddress addr = factory.getLocalAddress();
+ return String.format("%s:%d", addr.getHostName(), addr.getPort());
+ }
+
+ /**
+ * Get the connection address
+ * @return the connection as an address
+ * @throws IllegalStateException if the connection is not yet valid
+ */
+ public InetSocketAddress getConnectionAddress() {
+ Preconditions.checkState(factory != null, "service not started");
+ return factory.getLocalAddress();
+ }
+
+ /**
+ * Create an inet socket addr from the local host + port number
+ * @param port port to use
+ * @return a (hostname, port) pair
+ * @throws UnknownHostException if the server cannot resolve the host
+ */
+ private InetSocketAddress getAddress(int port) throws UnknownHostException {
+ return new InetSocketAddress(host, port < 0 ? 0 : port);
+ }
+
+ /**
+ * Initialize the service, including choosing a path for the data
+ * @param conf configuration
+ * @throws Exception
+ */
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+ port = conf.getInt(KEY_ZKSERVICE_PORT, 0);
+ tickTime = conf.getInt(KEY_ZKSERVICE_TICK_TIME,
+ ZooKeeperServer.DEFAULT_TICK_TIME);
+ String instancedirname = conf.getTrimmed(
+ KEY_ZKSERVICE_DIR, "");
+ host = conf.getTrimmed(KEY_ZKSERVICE_HOST, DEFAULT_ZKSERVICE_HOST);
+ if (instancedirname.isEmpty()) {
+ File testdir = new File(System.getProperty("test.dir", "target"));
+ instanceDir = new File(testdir, "zookeeper" + getName());
+ } else {
+ instanceDir = new File(instancedirname);
+ FileUtil.fullyDelete(instanceDir);
+ }
+ LOG.debug("Instance directory is {}", instanceDir);
+ mkdirStrict(instanceDir);
+ dataDir = new File(instanceDir, "data");
+ confDir = new File(instanceDir, "conf");
+ mkdirStrict(dataDir);
+ mkdirStrict(confDir);
+ super.serviceInit(conf);
+ }
+
+ /**
+ * Create a directory, ignoring if the dir is already there,
+ * and failing if a file or something else was at the end of that
+ * path
+ * @param dir dir to guarantee the existence of
+ * @throws IOException IO problems, or path exists but is not a dir
+ */
+ private void mkdirStrict(File dir) throws IOException {
+ if (!dir.mkdirs()) {
+ if (!dir.isDirectory()) {
+ throw new IOException("Failed to mkdir " + dir);
+ }
+ }
+ }
+
+ /**
+ * Append a formatted string to the diagnostics.
+ *
+ * A newline is appended afterwards.
+ * @param text text including any format commands
+ * @param args arguments for the forma operation.
+ */
+ protected void addDiagnostics(String text, Object ... args) {
+ diagnostics.append(String.format(text, args)).append('\n');
+ }
+
+ /**
+ * Get the diagnostics info
+ * @return the diagnostics string built up
+ */
+ public String getDiagnostics() {
+ return diagnostics.toString();
+ }
+
+ /**
+ * set up security. this must be done prior to creating
+ * the ZK instance, as it sets up JAAS if that has not been done already.
+ *
+ * @return true if the cluster has security enabled.
+ */
+ public boolean setupSecurity() throws IOException {
+ Configuration conf = getConfig();
+ String jaasContext = conf.getTrimmed(KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT);
+ secureServer = StringUtils.isNotEmpty(jaasContext);
+ if (secureServer) {
+ RegistrySecurity.validateContext(jaasContext);
+ RegistrySecurity.bindZKToServerJAASContext(jaasContext);
+ // policy on failed auth
+ System.setProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS,
+ conf.get(KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS,
+ "true"));
+
+ //needed so that you can use sasl: strings in the registry
+ System.setProperty(RegistryInternalConstants.ZOOKEEPER_AUTH_PROVIDER +".1",
+ RegistryInternalConstants.SASLAUTHENTICATION_PROVIDER);
+ String serverContext =
+ System.getProperty(PROP_ZK_SERVER_SASL_CONTEXT);
+ addDiagnostics("Server JAAS context s = %s", serverContext);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * Startup: start ZK. It is only after this that
+ * the binding information is valid.
+ * @throws Exception
+ */
+ @Override
+ protected void serviceStart() throws Exception {
+
+ setupSecurity();
+
+ ZooKeeperServer zkServer = new ZooKeeperServer();
+ FileTxnSnapLog ftxn = new FileTxnSnapLog(dataDir, dataDir);
+ zkServer.setTxnLogFactory(ftxn);
+ zkServer.setTickTime(tickTime);
+
+ LOG.info("Starting Local Zookeeper service");
+ factory = ServerCnxnFactory.createFactory();
+ factory.configure(getAddress(port), -1);
+ factory.startup(zkServer);
+
+ String connectString = getConnectionString();
+ LOG.info("In memory ZK started at {}\n", connectString);
+
+ if (LOG.isDebugEnabled()) {
+ StringWriter sw = new StringWriter();
+ PrintWriter pw = new PrintWriter(sw);
+ zkServer.dumpConf(pw);
+ pw.flush();
+ LOG.debug(sw.toString());
+ }
+ binding = new BindingInformation();
+ binding.ensembleProvider = new FixedEnsembleProvider(connectString);
+ binding.description =
+ getName() + " reachable at \"" + connectString + "\"";
+
+ addDiagnostics(binding.description);
+ // finally: set the binding information in the config
+ getConfig().set(KEY_REGISTRY_ZK_QUORUM, connectString);
+ }
+
+ /**
+ * When the service is stopped, it deletes the data directory
+ * and its contents
+ * @throws Exception
+ */
+ @Override
+ protected void serviceStop() throws Exception {
+ if (factory != null) {
+ factory.shutdown();
+ factory = null;
+ }
+ if (dataDir != null) {
+ FileUtil.fullyDelete(dataDir);
+ }
+ }
+
+ @Override
+ public BindingInformation supplyBindingInformation() {
+ Preconditions.checkNotNull(binding,
+ "Service is not started: binding information undefined");
+ return binding;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java
new file mode 100644
index 0000000000..f4f4976c7b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.server.services;
+
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+
+/**
+ * Service keys for configuring the {@link MicroZookeeperService}.
+ * These are not used in registry clients or the RM-side service,
+ * so are kept separate.
+ */
+public interface MicroZookeeperServiceKeys {
+ public static final String ZKSERVICE_PREFIX =
+ RegistryConstants.REGISTRY_PREFIX + "zk.service.";
+ /**
+ * Key to define the JAAS context for the ZK service: {@value}.
+ */
+ public static final String KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT =
+ ZKSERVICE_PREFIX + "service.jaas.context";
+
+ /**
+ * ZK servertick time: {@value}
+ */
+ public static final String KEY_ZKSERVICE_TICK_TIME =
+ ZKSERVICE_PREFIX + "ticktime";
+
+ /**
+ * host to register on: {@value}.
+ */
+ public static final String KEY_ZKSERVICE_HOST = ZKSERVICE_PREFIX + "host";
+ /**
+ * Default host to serve on -this is localhost as it
+ * is the only one guaranteed to be available: {@value}.
+ */
+ public static final String DEFAULT_ZKSERVICE_HOST = "localhost";
+ /**
+ * port; 0 or below means "any": {@value}
+ */
+ public static final String KEY_ZKSERVICE_PORT = ZKSERVICE_PREFIX + "port";
+
+ /**
+ * Directory containing data: {@value}
+ */
+ public static final String KEY_ZKSERVICE_DIR = ZKSERVICE_PREFIX + "dir";
+
+ /**
+ * Should failed SASL clients be allowed: {@value}?
+ *
+ * Default is the ZK default: true
+ */
+ public static final String KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS =
+ ZKSERVICE_PREFIX + "allow.failed.sasl.clients";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java
new file mode 100644
index 0000000000..693bb0b911
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java
@@ -0,0 +1,529 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.server.services;
+
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.curator.framework.api.BackgroundCallback;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
+import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Administrator service for the registry. This is the one with
+ * permissions to create the base directories and those for users.
+ *
+ * It also includes support for asynchronous operations, so that
+ * zookeeper connectivity problems do not hold up the server code
+ * performing the actions.
+ *
+ * Any action queued via {@link #submit(Callable)} will be
+ * run asynchronously. The {@link #createDirAsync(String, List, boolean)}
+ * is an example of such an an action
+ *
+ * A key async action is the depth-first tree purge, which supports
+ * pluggable policies for deleting entries. The method
+ * {@link #purge(String, NodeSelector, PurgePolicy, BackgroundCallback)}
+ * implements the recursive purge operation —the class
+ * {{AsyncPurge}} provides the asynchronous scheduling of this.
+ */
+public class RegistryAdminService extends RegistryOperationsService {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RegistryAdminService.class);
+ /**
+ * The ACL permissions for the user's homedir ACL.
+ */
+ public static final int USER_HOMEDIR_ACL_PERMISSIONS =
+ ZooDefs.Perms.READ | ZooDefs.Perms.WRITE
+ | ZooDefs.Perms.CREATE | ZooDefs.Perms.DELETE;
+
+ /**
+ * Executor for async operations
+ */
+ protected final ExecutorService executor;
+
+ /**
+ * Construct an instance of the service
+ * @param name service name
+ */
+ public RegistryAdminService(String name) {
+ this(name, null);
+ }
+
+ /**
+ * construct an instance of the service, using the
+ * specified binding source to bond to ZK
+ * @param name service name
+ * @param bindingSource provider of ZK binding information
+ */
+ public RegistryAdminService(String name,
+ RegistryBindingSource bindingSource) {
+ super(name, bindingSource);
+ executor = Executors.newCachedThreadPool(
+ new ThreadFactory() {
+ private AtomicInteger counter = new AtomicInteger(1);
+
+ @Override
+ public Thread newThread(Runnable r) {
+ return new Thread(r,
+ "RegistryAdminService " + counter.getAndIncrement());
+ }
+ });
+ }
+
+ /**
+ * Stop the service: halt the executor.
+ * @throws Exception exception.
+ */
+ @Override
+ protected void serviceStop() throws Exception {
+ stopExecutor();
+ super.serviceStop();
+ }
+
+ /**
+ * Stop the executor if it is not null.
+ * This uses {@link ExecutorService#shutdownNow()}
+ * and so does not block until they have completed.
+ */
+ protected synchronized void stopExecutor() {
+ if (executor != null) {
+ executor.shutdownNow();
+ }
+ }
+
+ /**
+ * Get the executor
+ * @return the executor
+ */
+ protected ExecutorService getExecutor() {
+ return executor;
+ }
+
+ /**
+ * Submit a callable
+ * @param callable callable
+ * @param type of the final get
+ * @return a future to wait on
+ */
+ public Future submit(Callable callable) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Submitting {}", callable);
+ }
+ return getExecutor().submit(callable);
+ }
+
+ /**
+ * Asynchronous operation to create a directory
+ * @param path path
+ * @param acls ACL list
+ * @param createParents flag to indicate parent dirs should be created
+ * as needed
+ * @return the future which will indicate whether or not the operation
+ * succeeded —and propagate any exceptions
+ * @throws IOException
+ */
+ public Future createDirAsync(final String path,
+ final List acls,
+ final boolean createParents) throws IOException {
+ return submit(new Callable() {
+ @Override
+ public Boolean call() throws Exception {
+ return maybeCreate(path, CreateMode.PERSISTENT,
+ acls, createParents);
+ }
+ });
+ }
+
+ /**
+ * Init operation sets up the system ACLs.
+ * @param conf configuration of the service
+ * @throws Exception
+ */
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+ super.serviceInit(conf);
+ RegistrySecurity registrySecurity = getRegistrySecurity();
+ if (registrySecurity.isSecureRegistry()) {
+ ACL sasl = registrySecurity.createSaslACLFromCurrentUser(ZooDefs.Perms.ALL);
+ registrySecurity.addSystemACL(sasl);
+ LOG.info("Registry System ACLs:",
+ RegistrySecurity.aclsToString(
+ registrySecurity.getSystemACLs()));
+ }
+ }
+
+ /**
+ * Start the service, including creating base directories with permissions
+ * @throws Exception
+ */
+ @Override
+ protected void serviceStart() throws Exception {
+ super.serviceStart();
+ // create the root directories
+ try {
+ createRootRegistryPaths();
+ } catch (NoPathPermissionsException e) {
+
+ String message = String.format(Locale.ENGLISH,
+ "Failed to create root paths {%s};" +
+ "\ndiagnostics={%s}" +
+ "\ncurrent registry is:" +
+ "\n{%s}",
+ e,
+ bindingDiagnosticDetails(),
+ dumpRegistryRobustly(true));
+
+ LOG.error(" Failure {}", e, e);
+ LOG.error(message);
+
+ // TODO: this is something temporary to deal with the problem
+ // that jenkins is failing this test
+ throw new NoPathPermissionsException(e.getPath().toString(), message, e);
+ }
+ }
+
+ /**
+ * Create the initial registry paths
+ * @throws IOException any failure
+ */
+ @VisibleForTesting
+ public void createRootRegistryPaths() throws IOException {
+
+ List systemACLs = getRegistrySecurity().getSystemACLs();
+ LOG.info("System ACLs {}",
+ RegistrySecurity.aclsToString(systemACLs));
+ maybeCreate("", CreateMode.PERSISTENT, systemACLs, false);
+ maybeCreate(PATH_USERS, CreateMode.PERSISTENT,
+ systemACLs, false);
+ maybeCreate(PATH_SYSTEM_SERVICES,
+ CreateMode.PERSISTENT,
+ systemACLs, false);
+ }
+
+ /**
+ * Get the path to a user's home dir
+ * @param username username
+ * @return a path for services underneath
+ */
+ protected String homeDir(String username) {
+ return RegistryUtils.homePathForUser(username);
+ }
+
+ /**
+ * Set up the ACL for the user.
+ * Important: this must run client-side as it needs
+ * to know the id:pass tuple for a user
+ * @param username user name
+ * @param perms permissions
+ * @return an ACL list
+ * @throws IOException ACL creation/parsing problems
+ */
+ public List aclsForUser(String username, int perms) throws IOException {
+ List clientACLs = getClientAcls();
+ RegistrySecurity security = getRegistrySecurity();
+ if (security.isSecureRegistry()) {
+ clientACLs.add(security.createACLfromUsername(username, perms));
+ }
+ return clientACLs;
+ }
+
+ /**
+ * Start an async operation to create the home path for a user
+ * if it does not exist
+ * @param shortname username, without any @REALM in kerberos
+ * @return the path created
+ * @throws IOException any failure while setting up the operation
+ *
+ */
+ public Future initUserRegistryAsync(final String shortname)
+ throws IOException {
+
+ String homeDir = homeDir(shortname);
+ if (!exists(homeDir)) {
+ // create the directory. The user does not
+ return createDirAsync(homeDir,
+ aclsForUser(shortname,
+ USER_HOMEDIR_ACL_PERMISSIONS),
+ false);
+ }
+ return null;
+ }
+
+ /**
+ * Create the home path for a user if it does not exist.
+ *
+ * This uses {@link #initUserRegistryAsync(String)} and then waits for the
+ * result ... the code path is the same as the async operation; this just
+ * picks up and relays/converts exceptions
+ * @param username username
+ * @return the path created
+ * @throws IOException any failure
+ *
+ */
+ public String initUserRegistry(final String username)
+ throws IOException {
+
+ try {
+ Future future = initUserRegistryAsync(username);
+ future.get();
+ } catch (InterruptedException e) {
+ throw (InterruptedIOException)
+ (new InterruptedIOException(e.toString()).initCause(e));
+ } catch (ExecutionException e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof IOException) {
+ throw (IOException) (cause);
+ } else {
+ throw new IOException(cause.toString(), cause);
+ }
+ }
+
+ return homeDir(username);
+ }
+
+ /**
+ * Method to validate the validity of the kerberos realm.
+ *
+ *
Insecure: not needed.
+ *
Secure: must have been determined.
+ *
+ */
+ protected void verifyRealmValidity() throws ServiceStateException {
+ if (isSecure()) {
+ String realm = getRegistrySecurity().getKerberosRealm();
+ if (StringUtils.isEmpty(realm)) {
+ throw new ServiceStateException("Cannot determine service realm");
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Started Registry operations in realm {}", realm);
+ }
+ }
+ }
+
+ /**
+ * Policy to purge entries
+ */
+ public enum PurgePolicy {
+ PurgeAll,
+ FailOnChildren,
+ SkipOnChildren
+ }
+
+ /**
+ * Recursive operation to purge all matching records under a base path.
+ *
+ *
Uses a depth first search
+ *
A match is on ID and persistence policy, or, if policy==-1, any match
+ *
If a record matches then it is deleted without any child searches
+ *
Deletions will be asynchronous if a callback is provided
+ *
+ *
+ * The code is designed to be robust against parallel deletions taking place;
+ * in such a case it will stop attempting that part of the tree. This
+ * avoid the situation of more than 1 purge happening in parallel and
+ * one of the purge operations deleteing the node tree above the other.
+ * @param path base path
+ * @param selector selector for the purge policy
+ * @param purgePolicy what to do if there is a matching record with children
+ * @param callback optional curator callback
+ * @return the number of delete operations perfomed. As deletes may be for
+ * everything under a path, this may be less than the number of records
+ * actually deleted
+ * @throws IOException problems
+ * @throws PathIsNotEmptyDirectoryException if an entry cannot be deleted
+ * as it has children and the purge policy is FailOnChildren
+ */
+ @VisibleForTesting
+ public int purge(String path,
+ NodeSelector selector,
+ PurgePolicy purgePolicy,
+ BackgroundCallback callback) throws IOException {
+
+
+ boolean toDelete = false;
+ // look at self to see if it has a service record
+ Map childEntries;
+ Collection entries;
+ try {
+ // list this path's children
+ childEntries = RegistryUtils.statChildren(this, path);
+ entries = childEntries.values();
+ } catch (PathNotFoundException e) {
+ // there's no record here, it may have been deleted already.
+ // exit
+ return 0;
+ }
+
+ try {
+ RegistryPathStatus registryPathStatus = stat(path);
+ ServiceRecord serviceRecord = resolve(path);
+ // there is now an entry here.
+ toDelete = selector.shouldSelect(path, registryPathStatus, serviceRecord);
+ } catch (EOFException ignored) {
+ // ignore
+ } catch (InvalidRecordException ignored) {
+ // ignore
+ } catch (NoRecordException ignored) {
+ // ignore
+ } catch (PathNotFoundException e) {
+ // there's no record here, it may have been deleted already.
+ // exit
+ return 0;
+ }
+
+ if (toDelete && !entries.isEmpty()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Match on record @ {} with children ", path);
+ }
+ // there's children
+ switch (purgePolicy) {
+ case SkipOnChildren:
+ // don't do the deletion... continue to next record
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Skipping deletion");
+ }
+ toDelete = false;
+ break;
+ case PurgeAll:
+ // mark for deletion
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Scheduling for deletion with children");
+ }
+ toDelete = true;
+ entries = new ArrayList(0);
+ break;
+ case FailOnChildren:
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Failing deletion operation");
+ }
+ throw new PathIsNotEmptyDirectoryException(path);
+ }
+ }
+
+ int deleteOps = 0;
+ if (toDelete) {
+ try {
+ zkDelete(path, true, callback);
+ } catch (PathNotFoundException e) {
+ // sign that the path was deleted during the operation.
+ // this is a no-op, and all children can be skipped
+ return deleteOps;
+ }
+ deleteOps++;
+ }
+
+ // now go through the children
+ for (RegistryPathStatus status : entries) {
+ String childname = status.path;
+ String childpath = RegistryPathUtils.join(path, childname);
+ deleteOps += purge(childpath,
+ selector,
+ purgePolicy,
+ callback);
+ }
+
+ return deleteOps;
+ }
+
+ /**
+ * Comparator used for purge logic
+ */
+ public interface NodeSelector {
+
+ boolean shouldSelect(String path,
+ RegistryPathStatus registryPathStatus,
+ ServiceRecord serviceRecord);
+ }
+
+ /**
+ * An async registry purge action taking
+ * a selector which decides what to delete
+ */
+ public class AsyncPurge implements Callable {
+
+ private final BackgroundCallback callback;
+ private final NodeSelector selector;
+ private final String path;
+ private final PurgePolicy purgePolicy;
+
+ public AsyncPurge(String path,
+ NodeSelector selector,
+ PurgePolicy purgePolicy,
+ BackgroundCallback callback) {
+ this.callback = callback;
+ this.selector = selector;
+ this.path = path;
+ this.purgePolicy = purgePolicy;
+ }
+
+ @Override
+ public Integer call() throws Exception {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Executing {}", this);
+ }
+ return purge(path,
+ selector,
+ purgePolicy,
+ callback);
+ }
+
+ @Override
+ public String toString() {
+ return String.format(
+ "Record purge under %s with selector %s",
+ path, selector);
+ }
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java
new file mode 100644
index 0000000000..85d24b3a02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Basic services for the YARN registry
+ *
+ *
The {@link org.apache.hadoop.registry.server.services.RegistryAdminService}
+ * extends the shared Yarn Registry client with registry setup and
+ * (potentially asynchronous) administrative actions.
+ *
+ *
+ * The {@link org.apache.hadoop.registry.server.services.MicroZookeeperService}
+ * is a transient Zookeeper instance bound to the YARN service lifecycle.
+ * It is suitable for testing.
+ *
+ *
+ * The {@link org.apache.hadoop.registry.server.services.AddingCompositeService}
+ * extends the standard YARN composite service by making its add and remove
+ * methods public. It is a utility service used in parts of the codebase
+ *
+ *
+ *
+ *
+ */
+package org.apache.hadoop.registry.server.services;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/resources/.keep b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/resources/.keep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.tla b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.tla
new file mode 100644
index 0000000000..1c19adead4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.tla
@@ -0,0 +1,538 @@
+---------------------------- MODULE yarnregistry ----------------------------
+
+EXTENDS FiniteSets, Sequences, Naturals, TLC
+
+
+(*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *)
+
+(*
+
+============================================================================
+
+This defines the YARN registry in terms of operations on sets of records.
+
+Every registry entry is represented as a record containing both the path and the data.
+
+It assumes that
+
+1. operations on this set are immediate.
+2. selection operations (such as \A and \E are atomic)
+3. changes are immediately visible to all other users of the registry.
+4. This clearly implies that changes are visible in the sequence in which they happen.
+
+A multi-server Zookeeper-based registry may not meet all those assumptions
+
+1. changes may take time to propagate across the ZK quorum, hence changes cannot
+be considered immediate from the perspective of other registry clients.
+(assumptions (1) and (3)).
+
+2. Selection operations may not be atomic. (assumption (2)).
+
+Operations will still happen in the order received by the elected ZK master
+
+A stricter definition would try to state that all operations are eventually
+true excluding other changes happening during a sequence of action.
+This is left as an excercise for the reader.
+
+The specification also omits all coverage of the permissions policy.
+*)
+
+
+
+CONSTANTS
+ PathChars, \* the set of valid characters in a path
+ Paths, \* the set of all possible valid paths
+ Data, \* the set of all possible sequences of bytes
+ Address, \* the set of all possible address n-tuples
+ Addresses, \* the set of all possible address instances
+ Endpoints , \* the set of all possible endpoints
+ PersistPolicies,\* the set of persistence policies
+ ServiceRecords, \* all service records
+ Registries, \* the set of all possile registries
+ BindActions, \* all possible put actions
+ DeleteActions, \* all possible delete actions
+ PurgeActions, \* all possible purge actions
+ MknodeActions \* all possible mkdir actions
+
+
+
+(* the registry*)
+VARIABLE registry
+
+(* Sequence of actions to apply to the registry *)
+VARIABLE actions
+
+----------------------------------------------------------------------------------------
+(* Tuple of all variables. *)
+
+
+vars == << registry, actions >>
+
+
+----------------------------------------------------------------------------------------
+
+
+
+
+(* Persistence policy *)
+PersistPolicySet == {
+ "", \* Undefined; field not present. PERMANENT is implied.
+ "permanent", \* persists until explicitly removed
+ "application", \* persists until the application finishes
+ "application-attempt", \* persists until the application attempt finishes
+ "container" \* persists until the container finishes
+ }
+
+(* Type invariants. *)
+TypeInvariant ==
+ /\ \A p \in PersistPolicies: p \in PersistPolicySet
+
+
+
+----------------------------------------------------------------------------------------
+
+
+
+(*
+
+An Entry is defined as a path, and the actual
+data which it contains.
+
+By including the path in an entry, we avoid having to define some
+function mapping Path -> entry. Instead a registry can be defined as a
+set of RegistryEntries matching the validity critera.
+
+*)
+
+RegistryEntry == [
+ \* The path to the entry
+ path: Paths,
+
+ \* the data in the entry
+ data: Data
+ ]
+
+
+(*
+ An endpoint in a service record
+*)
+Endpoint == [
+ \* API of the endpoint: some identifier
+ api: STRING,
+
+ \* A list of address n-tuples
+ addresses: Addresses
+]
+
+(* Attributes are the set of all string to string mappings *)
+
+Attributes == [
+STRING |-> STRING
+]
+
+(*
+ A service record
+*)
+ServiceRecord == [
+ \* ID -used when applying the persistence policy
+ yarn_id: STRING,
+
+ \* the persistence policy
+ yarn_persistence: PersistPolicySet,
+
+ \*A description
+ description: STRING,
+
+ \* A set of endpoints
+ external: Endpoints,
+
+ \* Endpoints intended for use internally
+ internal: Endpoints,
+
+ \* Attributes are a function
+ attributes: Attributes
+]
+
+
+----------------------------------------------------------------------------------------
+
+(* Action Records *)
+
+putAction == [
+ type: "put",
+ record: ServiceRecord
+]
+
+deleteAction == [
+ type: "delete",
+ path: STRING,
+ recursive: BOOLEAN
+]
+
+purgeAction == [
+ type: "purge",
+ path: STRING,
+ persistence: PersistPolicySet
+]
+
+mkNodeAction == [
+ type: "mknode",
+ path: STRING,
+ parents: BOOLEAN
+]
+
+
+----------------------------------------------------------------------------------------
+
+(*
+
+ Path operations
+
+*)
+
+(*
+Parent is defined for non empty sequences
+ *)
+
+parent(path) == SubSeq(path, 1, Len(path)-1)
+
+isParent(path, c) == path = parent(c)
+
+----------------------------------------------------------------------------------------
+(*
+Registry Access Operations
+*)
+
+(*
+Lookup all entries in a registry with a matching path
+*)
+
+resolve(Registry, path) == \A entry \in Registry: entry.path = path
+
+(*
+A path exists in the registry iff there is an entry with that path
+*)
+
+exists(Registry, path) == resolve(Registry, path) /= {}
+
+(*
+A parent entry, or an empty set if there is none
+*)
+parentEntry(Registry, path) == resolve(Registry, parent(path))
+
+(*
+A root path is the empty sequence
+*)
+isRootPath(path) == path = <<>>
+
+(*
+The root entry is the entry whose path is the root path
+*)
+isRootEntry(entry) == entry.path = <<>>
+
+
+(*
+A path p is an ancestor of another path d if they are different, and the path d
+starts with path p
+*)
+
+isAncestorOf(path, d) ==
+ /\ path /= d
+ /\ \E k : SubSeq(d, 0, k) = path
+
+
+ancestorPathOf(path) ==
+ \A a \in Paths: isAncestorOf(a, path)
+
+(*
+The set of all children of a path in the registry
+*)
+
+children(R, path) == \A c \in R: isParent(path, c.path)
+
+(*
+A path has children if the children() function does not return the empty set
+*)
+hasChildren(R, path) == children(R, path) /= {}
+
+(*
+Descendant: a child of a path or a descendant of a child of a path
+*)
+
+descendants(R, path) == \A e \in R: isAncestorOf(path, e.path)
+
+(*
+Ancestors: all entries in the registry whose path is an entry of the path argument
+*)
+ancestors(R, path) == \A e \in R: isAncestorOf(e.path, path)
+
+(*
+The set of entries that are a path and its descendants
+*)
+pathAndDescendants(R, path) ==
+ \/ \A e \in R: isAncestorOf(path, e.path)
+ \/ resolve(R, path)
+
+
+(*
+For validity, all entries must match the following criteria
+ *)
+
+validRegistry(R) ==
+ \* there can be at most one entry for a path.
+ /\ \A e \in R: Cardinality(resolve(R, e.path)) = 1
+
+ \* There's at least one root entry
+ /\ \E e \in R: isRootEntry(e)
+
+ \* an entry must be the root entry or have a parent entry
+ /\ \A e \in R: isRootEntry(e) \/ exists(R, parent(e.path))
+
+ \* If the entry has data, it must be a service record
+ /\ \A e \in R: (e.data = << >> \/ e.data \in ServiceRecords)
+
+
+----------------------------------------------------------------------------------------
+(*
+Registry Manipulation
+*)
+
+(*
+An entry can be put into the registry iff
+its parent is present or it is the root entry
+*)
+canBind(R, e) ==
+ isRootEntry(e) \/ exists(R, parent(e.path))
+
+(*
+'bind() adds/replaces an entry if permitted
+*)
+
+bind(R, e) ==
+ /\ canBind(R, e)
+ /\ R' = (R \ resolve(R, e.path)) \union {e}
+
+
+(*
+mknode() adds a new empty entry where there was none before, iff
+-the parent exists
+-it meets the requirement for being "bindable"
+*)
+
+mknodeSimple(R, path) ==
+ LET record == [ path |-> path, data |-> <<>> ]
+ IN \/ exists(R, path)
+ \/ (exists(R, parent(path)) /\ canBind(R, record) /\ (R' = R \union {record} ))
+
+
+(*
+For all parents, the mknodeSimpl() criteria must apply.
+This could be defined recursively, though as TLA+ does not support recursion,
+an alternative is required
+
+
+Because this specification is declaring the final state of a operation, not
+the implemental, all that is needed is to describe those parents.
+
+It declares that the mkdirSimple state applies to the path and all its parents in the set R'
+
+*)
+mknodeWithParents(R, path) ==
+ /\ \A p2 \in ancestors(R, path) : mknodeSimple(R, p2)
+ /\ mknodeSimple(R, path)
+
+
+mknode(R, path, recursive) ==
+ IF recursive THEN mknodeWithParents(R, path) ELSE mknodeSimple(R, path)
+
+(*
+Deletion is set difference on any existing entries
+*)
+
+simpleDelete(R, path) ==
+ /\ ~isRootPath(path)
+ /\ children(R, path) = {}
+ /\ R' = R \ resolve(R, path)
+
+(*
+Recursive delete: neither the path or its descendants exists in the new registry
+*)
+
+recursiveDelete(R, path) ==
+ \* Root path: the new registry is the initial registry again
+ /\ isRootPath(path) => R' = { [ path |-> <<>>, data |-> <<>> ] }
+ \* Any other entry: the new registry is a set with any existing
+ \* entry for that path is removed, and the new entry added
+ /\ ~isRootPath(path) => R' = R \ ( resolve(R, path) \union descendants(R, path))
+
+
+(*
+Delete operation which chooses the recursiveness policy based on an argument
+*)
+
+delete(R, path, recursive) ==
+ IF recursive THEN recursiveDelete(R, path) ELSE simpleDelete(R, path)
+
+
+(*
+Purge ensures that all entries under a path with the matching ID and policy are not there
+afterwards
+*)
+
+purge(R, path, id, persistence) ==
+ /\ (persistence \in PersistPolicySet)
+ /\ \A p2 \in pathAndDescendants(R, path) :
+ (p2.attributes["yarn:id"] = id /\ p2.attributes["yarn:persistence"] = persistence)
+ => recursiveDelete(R, p2.path)
+
+(*
+resolveRecord() resolves the record at a path or fails.
+
+It relies on the fact that if the cardinality of a set is 1, then the CHOOSE operator
+is guaranteed to return the single entry of that set, iff the choice predicate holds.
+
+Using a predicate of TRUE, it always succeeds, so this function selects
+the sole entry of the resolve operation.
+*)
+
+resolveRecord(R, path) ==
+ LET l == resolve(R, path) IN
+ /\ Cardinality(l) = 1
+ /\ CHOOSE e \in l : TRUE
+
+(*
+The specific action of putting an entry into a record includes validating the record
+*)
+
+validRecordToBind(path, record) ==
+ \* The root entry must have permanent persistence
+ isRootPath(path) => (record.attributes["yarn:persistence"] = "permanent"
+ \/ record.attributes["yarn:persistence"] = "")
+
+
+(*
+Binding a service record involves validating it then putting it in the registry
+marshalled as the data in the entry
+ *)
+bindRecord(R, path, record) ==
+ /\ validRecordToBind(path, record)
+ /\ bind(R, [path |-> path, data |-> record])
+
+
+----------------------------------------------------------------------------------------
+
+
+
+(*
+The action queue can only contain one of the sets of action types, and
+by giving each a unique name, those sets are guaranteed to be disjoint
+*)
+ QueueInvariant ==
+ /\ \A a \in actions:
+ \/ (a \in BindActions /\ a.type="bind")
+ \/ (a \in DeleteActions /\ a.type="delete")
+ \/ (a \in PurgeActions /\ a.type="purge")
+ \/ (a \in MknodeActions /\ a.type="mknode")
+
+
+(*
+Applying queued actions
+*)
+
+applyAction(R, a) ==
+ \/ (a \in BindActions /\ bindRecord(R, a.path, a.record) )
+ \/ (a \in MknodeActions /\ mknode(R, a.path, a.recursive) )
+ \/ (a \in DeleteActions /\ delete(R, a.path, a.recursive) )
+ \/ (a \in PurgeActions /\ purge(R, a.path, a.id, a.persistence))
+
+
+(*
+Apply the first action in a list and then update the actions
+*)
+applyFirstAction(R, a) ==
+ /\ actions /= <<>>
+ /\ applyAction(R, Head(a))
+ /\ actions' = Tail(a)
+
+
+Next == applyFirstAction(registry, actions)
+
+(*
+All submitted actions must eventually be applied.
+*)
+
+
+Liveness == <>( actions = <<>> )
+
+
+(*
+The initial state of a registry has the root entry.
+*)
+
+InitialRegistry == registry = {
+ [ path |-> <<>>, data |-> <<>> ]
+}
+
+
+(*
+The valid state of the "registry" variable is defined as
+Via the validRegistry predicate
+*)
+
+ValidRegistryState == validRegistry(registry)
+
+
+
+(*
+The initial state of the system
+*)
+InitialState ==
+ /\ InitialRegistry
+ /\ ValidRegistryState
+ /\ actions = <<>>
+
+
+(*
+The registry has an initial state, the series of state changes driven by the actions,
+and the requirement that it does act on those actions.
+*)
+RegistrySpec ==
+ /\ InitialState
+ /\ [][Next]_vars
+ /\ Liveness
+
+
+----------------------------------------------------------------------------------------
+
+(*
+Theorem: For all operations from that initial state, the registry state is still valid
+*)
+THEOREM InitialState => [] ValidRegistryState
+
+(*
+Theorem: for all operations from that initial state, the type invariants hold
+*)
+THEOREM InitialState => [] TypeInvariant
+
+(*
+Theorem: the queue invariants hold
+*)
+THEOREM InitialState => [] QueueInvariant
+
+=============================================================================
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java
new file mode 100644
index 0000000000..5b34f6032e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry;
+
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+/**
+ * Abstract registry tests .. inits the field {@link #registry}
+ * before the test with an instance of {@link RMRegistryOperationsService};
+ * and {@link #operations} with the same instance cast purely
+ * to the type {@link RegistryOperations}.
+ *
+ */
+public class AbstractRegistryTest extends AbstractZKRegistryTest {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractRegistryTest.class);
+ protected RMRegistryOperationsService registry;
+ protected RegistryOperations operations;
+
+ @Before
+ public void setupRegistry() throws IOException {
+ registry = new RMRegistryOperationsService("yarnRegistry");
+ operations = registry;
+ registry.init(createRegistryConfiguration());
+ registry.start();
+ operations.delete("/", true);
+ registry.createRootRegistryPaths();
+ addToTeardown(registry);
+ }
+
+ /**
+ * Create a service entry with the sample endpoints, and put it
+ * at the destination
+ * @param path path
+ * @param createFlags flags
+ * @return the record
+ * @throws IOException on a failure
+ */
+ protected ServiceRecord putExampleServiceEntry(String path, int createFlags) throws
+ IOException,
+ URISyntaxException {
+ return putExampleServiceEntry(path, createFlags, PersistencePolicies.PERMANENT);
+ }
+
+ /**
+ * Create a service entry with the sample endpoints, and put it
+ * at the destination
+ * @param path path
+ * @param createFlags flags
+ * @return the record
+ * @throws IOException on a failure
+ */
+ protected ServiceRecord putExampleServiceEntry(String path,
+ int createFlags,
+ String persistence)
+ throws IOException, URISyntaxException {
+ ServiceRecord record = buildExampleServiceEntry(persistence);
+
+ registry.mknode(RegistryPathUtils.parentOf(path), true);
+ operations.bind(path, record, createFlags);
+ return record;
+ }
+
+ /**
+ * Assert a path exists
+ * @param path path in the registry
+ * @throws IOException
+ */
+ public void assertPathExists(String path) throws IOException {
+ operations.stat(path);
+ }
+
+ /**
+ * assert that a path does not exist
+ * @param path path in the registry
+ * @throws IOException
+ */
+ public void assertPathNotFound(String path) throws IOException {
+ try {
+ operations.stat(path);
+ fail("Path unexpectedly found: " + path);
+ } catch (PathNotFoundException e) {
+
+ }
+ }
+
+ /**
+ * Assert that a path resolves to a service record
+ * @param path path in the registry
+ * @throws IOException
+ */
+ public void assertResolves(String path) throws IOException {
+ operations.resolve(path);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java
new file mode 100644
index 0000000000..bcff6222f9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.server.services.AddingCompositeService;
+import org.apache.hadoop.registry.server.services.MicroZookeeperService;
+import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+
+public class AbstractZKRegistryTest extends RegistryTestHelper {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractZKRegistryTest.class);
+
+ private static final AddingCompositeService servicesToTeardown =
+ new AddingCompositeService("teardown");
+ // static initializer guarantees it is always started
+ // ahead of any @BeforeClass methods
+ static {
+ servicesToTeardown.init(new Configuration());
+ servicesToTeardown.start();
+ }
+
+ @Rule
+ public final Timeout testTimeout = new Timeout(10000);
+
+ @Rule
+ public TestName methodName = new TestName();
+
+ protected static void addToTeardown(Service svc) {
+ servicesToTeardown.addService(svc);
+ }
+
+ @AfterClass
+ public static void teardownServices() throws IOException {
+ describe(LOG, "teardown of static services");
+ servicesToTeardown.close();
+ }
+
+ protected static MicroZookeeperService zookeeper;
+
+
+ @BeforeClass
+ public static void createZKServer() throws Exception {
+ File zkDir = new File("target/zookeeper");
+ FileUtils.deleteDirectory(zkDir);
+ assertTrue(zkDir.mkdirs());
+ zookeeper = new MicroZookeeperService("InMemoryZKService");
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkDir.getAbsolutePath());
+ zookeeper.init(conf);
+ zookeeper.start();
+ addToTeardown(zookeeper);
+ }
+
+ /**
+ * give our thread a name
+ */
+ @Before
+ public void nameThread() {
+ Thread.currentThread().setName("JUnit");
+ }
+
+ /**
+ * Returns the connection string to use
+ *
+ * @return connection string
+ */
+ public String getConnectString() {
+ return zookeeper.getConnectionString();
+ }
+
+ public YarnConfiguration createRegistryConfiguration() {
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, 1000);
+ conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_INTERVAL, 500);
+ conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_TIMES, 10);
+ conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_CEILING, 10);
+ conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM,
+ zookeeper.getConnectionString());
+ return conf;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
new file mode 100644
index 0000000000..38cc2cb726
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
@@ -0,0 +1,401 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.types.AddressTypes;
+import org.apache.hadoop.registry.client.types.Endpoint;
+import org.apache.hadoop.registry.client.types.ProtocolTypes;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.apache.hadoop.registry.secure.AbstractSecureRegistryTest;
+import org.apache.zookeeper.common.PathUtils;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.Subject;
+import javax.security.auth.login.LoginContext;
+import javax.security.auth.login.LoginException;
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint;
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.ipcEndpoint;
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint;
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.tuple;
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.webEndpoint;
+
+/**
+ * This is a set of static methods to aid testing the registry operations.
+ * The methods can be imported statically —or the class used as a base
+ * class for tests.
+ */
+public class RegistryTestHelper extends Assert {
+ public static final String SC_HADOOP = "org-apache-hadoop";
+ public static final String USER = "devteam/";
+ public static final String NAME = "hdfs";
+ public static final String API_WEBHDFS = "org_apache_hadoop_namenode_webhdfs";
+ public static final String API_HDFS = "org_apache_hadoop_namenode_dfs";
+ public static final String USERPATH = RegistryConstants.PATH_USERS + USER;
+ public static final String PARENT_PATH = USERPATH + SC_HADOOP + "/";
+ public static final String ENTRY_PATH = PARENT_PATH + NAME;
+ public static final String NNIPC = "nnipc";
+ public static final String IPC2 = "IPC2";
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RegistryTestHelper.class);
+ public static final String KTUTIL = "ktutil";
+ private static final RegistryUtils.ServiceRecordMarshal recordMarshal =
+ new RegistryUtils.ServiceRecordMarshal();
+
+ /**
+ * Assert the path is valid by ZK rules
+ * @param path path to check
+ */
+ public static void assertValidZKPath(String path) {
+ try {
+ PathUtils.validatePath(path);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException("Invalid Path " + path + ": " + e, e);
+ }
+ }
+
+ /**
+ * Assert that a string is not empty (null or "")
+ * @param message message to raise if the string is empty
+ * @param check string to check
+ */
+ public static void assertNotEmpty(String message, String check) {
+ if (StringUtils.isEmpty(check)) {
+ fail(message);
+ }
+ }
+
+ /**
+ * Assert that a string is empty (null or "")
+ * @param check string to check
+ */
+ public static void assertNotEmpty(String check) {
+ if (StringUtils.isEmpty(check)) {
+ fail("Empty string");
+ }
+ }
+
+ /**
+ * Log the details of a login context
+ * @param name name to assert that the user is logged in as
+ * @param loginContext the login context
+ */
+ public static void logLoginDetails(String name,
+ LoginContext loginContext) {
+ assertNotNull("Null login context", loginContext);
+ Subject subject = loginContext.getSubject();
+ LOG.info("Logged in as {}:\n {}", name, subject);
+ }
+
+ /**
+ * Set the JVM property to enable Kerberos debugging
+ */
+ public static void enableKerberosDebugging() {
+ System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG,
+ "true");
+ }
+ /**
+ * Set the JVM property to enable Kerberos debugging
+ */
+ public static void disableKerberosDebugging() {
+ System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG,
+ "false");
+ }
+
+ /**
+ * General code to validate bits of a component/service entry built iwth
+ * {@link #addSampleEndpoints(ServiceRecord, String)}
+ * @param record instance to check
+ */
+ public static void validateEntry(ServiceRecord record) {
+ assertNotNull("null service record", record);
+ List endpoints = record.external;
+ assertEquals(2, endpoints.size());
+
+ Endpoint webhdfs = findEndpoint(record, API_WEBHDFS, true, 1, 1);
+ assertEquals(API_WEBHDFS, webhdfs.api);
+ assertEquals(AddressTypes.ADDRESS_URI, webhdfs.addressType);
+ assertEquals(ProtocolTypes.PROTOCOL_REST, webhdfs.protocolType);
+ List> addressList = webhdfs.addresses;
+ List url = addressList.get(0);
+ String addr = url.get(0);
+ assertTrue(addr.contains("http"));
+ assertTrue(addr.contains(":8020"));
+
+ Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2);
+ assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT,
+ nnipc.protocolType);
+
+ Endpoint ipc2 = findEndpoint(record, IPC2, false, 1,2);
+
+ Endpoint web = findEndpoint(record, "web", true, 1, 1);
+ assertEquals(1, web.addresses.size());
+ assertEquals(1, web.addresses.get(0).size());
+ }
+
+ /**
+ * Assert that an endpoint matches the criteria
+ * @param endpoint endpoint to examine
+ * @param addressType expected address type
+ * @param protocolType expected protocol type
+ * @param api API
+ */
+ public static void assertMatches(Endpoint endpoint,
+ String addressType,
+ String protocolType,
+ String api) {
+ assertNotNull(endpoint);
+ assertEquals(addressType, endpoint.addressType);
+ assertEquals(protocolType, endpoint.protocolType);
+ assertEquals(api, endpoint.api);
+ }
+
+ /**
+ * Assert the records match.
+ * @param source record that was written
+ * @param resolved the one that resolved.
+ */
+ public static void assertMatches(ServiceRecord source, ServiceRecord resolved) {
+ assertNotNull("Null source record ", source);
+ assertNotNull("Null resolved record ", resolved);
+ assertEquals(source.description, resolved.description);
+
+ Map srcAttrs = source.attributes();
+ Map resolvedAttrs = resolved.attributes();
+ String sourceAsString = source.toString();
+ String resolvedAsString = resolved.toString();
+ assertEquals("Wrong count of attrs in \n" + sourceAsString
+ + "\nfrom\n" + resolvedAsString,
+ srcAttrs.size(),
+ resolvedAttrs.size());
+ for (Map.Entry entry : srcAttrs.entrySet()) {
+ String attr = entry.getKey();
+ assertEquals("attribute "+ attr, entry.getValue(), resolved.get(attr));
+ }
+ assertEquals("wrong external endpoint count",
+ source.external.size(), resolved.external.size());
+ assertEquals("wrong external endpoint count",
+ source.internal.size(), resolved.internal.size());
+ }
+
+ /**
+ * Find an endpoint in a record or fail,
+ * @param record record
+ * @param api API
+ * @param external external?
+ * @param addressElements expected # of address elements?
+ * @param addressTupleSize expected size of a type
+ * @return the endpoint.
+ */
+ public static Endpoint findEndpoint(ServiceRecord record,
+ String api, boolean external, int addressElements, int addressTupleSize) {
+ Endpoint epr = external ? record.getExternalEndpoint(api)
+ : record.getInternalEndpoint(api);
+ if (epr != null) {
+ assertEquals("wrong # of addresses",
+ addressElements, epr.addresses.size());
+ assertEquals("wrong # of elements in an address tuple",
+ addressTupleSize, epr.addresses.get(0).size());
+ return epr;
+ }
+ List endpoints = external ? record.external : record.internal;
+ StringBuilder builder = new StringBuilder();
+ for (Endpoint endpoint : endpoints) {
+ builder.append("\"").append(endpoint).append("\" ");
+ }
+ fail("Did not find " + api + " in endpoints " + builder);
+ // never reached; here to keep the compiler happy
+ return null;
+ }
+
+ /**
+ * Log a record
+ * @param name record name
+ * @param record details
+ * @throws IOException only if something bizarre goes wrong marshalling
+ * a record.
+ */
+ public static void logRecord(String name, ServiceRecord record) throws
+ IOException {
+ LOG.info(" {} = \n{}\n", name, recordMarshal.toJson(record));
+ }
+
+ /**
+ * Create a service entry with the sample endpoints
+ * @param persistence persistence policy
+ * @return the record
+ * @throws IOException on a failure
+ */
+ public static ServiceRecord buildExampleServiceEntry(String persistence) throws
+ IOException,
+ URISyntaxException {
+ ServiceRecord record = new ServiceRecord();
+ record.set(YarnRegistryAttributes.YARN_ID, "example-0001");
+ record.set(YarnRegistryAttributes.YARN_PERSISTENCE, persistence);
+ addSampleEndpoints(record, "namenode");
+ return record;
+ }
+
+ /**
+ * Add some endpoints
+ * @param entry entry
+ */
+ public static void addSampleEndpoints(ServiceRecord entry, String hostname)
+ throws URISyntaxException {
+ assertNotNull(hostname);
+ entry.addExternalEndpoint(webEndpoint("web",
+ new URI("http", hostname + ":80", "/")));
+ entry.addExternalEndpoint(
+ restEndpoint(API_WEBHDFS,
+ new URI("http", hostname + ":8020", "/")));
+
+ Endpoint endpoint = ipcEndpoint(API_HDFS, true, null);
+ endpoint.addresses.add(tuple(hostname, "8030"));
+ entry.addInternalEndpoint(endpoint);
+ InetSocketAddress localhost = new InetSocketAddress("localhost", 8050);
+ entry.addInternalEndpoint(
+ inetAddrEndpoint(NNIPC, ProtocolTypes.PROTOCOL_THRIFT, "localhost",
+ 8050));
+ entry.addInternalEndpoint(
+ RegistryTypeUtils.ipcEndpoint(
+ IPC2,
+ true,
+ RegistryTypeUtils.marshall(localhost)));
+ }
+
+ /**
+ * Describe the stage in the process with a box around it -so as
+ * to highlight it in test logs
+ * @param log log to use
+ * @param text text
+ * @param args logger args
+ */
+ public static void describe(Logger log, String text, Object...args) {
+ log.info("\n=======================================");
+ log.info(text, args);
+ log.info("=======================================\n");
+ }
+
+ /**
+ * log out from a context if non-null ... exceptions are caught and logged
+ * @param login login context
+ * @return null, always
+ */
+ public static LoginContext logout(LoginContext login) {
+ try {
+ if (login != null) {
+ LOG.debug("Logging out login context {}", login.toString());
+ login.logout();
+ }
+ } catch (LoginException e) {
+ LOG.warn("Exception logging out: {}", e, e);
+ }
+ return null;
+ }
+
+ /**
+ * Exec the native ktutil to list the keys
+ * (primarily to verify that the generated keytabs are compatible).
+ * This operation is not executed on windows. On other platforms
+ * it requires ktutil to be installed and on the path
+ *
+ * ktutil --keytab=target/kdc/zookeeper.keytab list --keys
+ *
+ * @param keytab keytab to list
+ * @throws IOException on any execution problem, including the executable
+ * being missing
+ */
+ public static String ktList(File keytab) throws IOException {
+ if (!Shell.WINDOWS) {
+ String path = keytab.getAbsolutePath();
+ String out = Shell.execCommand(
+ KTUTIL,
+ "--keytab=" + path,
+ "list",
+ "--keys"
+ );
+ LOG.info("Listing of keytab {}:\n{}\n", path, out);
+ return out;
+ }
+ return "";
+ }
+
+ /**
+ * Perform a robust ktutils -l ... catches and ignores
+ * exceptions, otherwise the output is logged.
+ * @param keytab keytab to list
+ * @return the result of the operation, or "" on any problem
+ */
+ public static String ktListRobust(File keytab) {
+ try {
+ return ktList(keytab);
+ } catch (IOException e) {
+ // probably not on the path
+ return "";
+ }
+ }
+
+ /**
+ * Login via a UGI. Requres UGI to have been set up
+ * @param user username
+ * @param keytab keytab to list
+ * @return the UGI
+ * @throws IOException
+ */
+ public static UserGroupInformation loginUGI(String user, File keytab) throws
+ IOException {
+ LOG.info("Logging in as {} from {}", user, keytab);
+ return UserGroupInformation.loginUserFromKeytabAndReturnUGI(user,
+ keytab.getAbsolutePath());
+ }
+
+ public static ServiceRecord createRecord(String persistence) {
+ return createRecord("01", persistence, "description");
+ }
+
+ public static ServiceRecord createRecord(String id, String persistence,
+ String description) {
+ ServiceRecord serviceRecord = new ServiceRecord();
+ serviceRecord.set(YarnRegistryAttributes.YARN_ID, id);
+ serviceRecord.description = description;
+ serviceRecord.set(YarnRegistryAttributes.YARN_PERSISTENCE, persistence);
+ return serviceRecord;
+ }
+
+ public static ServiceRecord createRecord(String id, String persistence,
+ String description, String data) {
+ return createRecord(id, persistence, description);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java
new file mode 100644
index 0000000000..14e3b1fa63
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import org.apache.hadoop.registry.RegistryTestHelper;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.ServiceRecordHeader;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+
+/**
+ * Test record marshalling
+ */
+public class TestMarshalling extends RegistryTestHelper {
+ private static final Logger
+ LOG = LoggerFactory.getLogger(TestMarshalling.class);
+
+ @Rule
+ public final Timeout testTimeout = new Timeout(10000);
+ @Rule
+ public TestName methodName = new TestName();
+ private static RegistryUtils.ServiceRecordMarshal marshal;
+
+ @BeforeClass
+ public static void setupClass() {
+ marshal = new RegistryUtils.ServiceRecordMarshal();
+ }
+
+ @Test
+ public void testRoundTrip() throws Throwable {
+ String persistence = PersistencePolicies.PERMANENT;
+ ServiceRecord record = createRecord(persistence);
+ record.set("customkey","customvalue");
+ record.set("customkey2","customvalue2");
+ LOG.info(marshal.toJson(record));
+ byte[] bytes = marshal.toBytes(record);
+ ServiceRecord r2 = marshal.fromBytes("", bytes, 0);
+ assertMatches(record, r2);
+ }
+
+ @Test
+ public void testRoundTripHeaders() throws Throwable {
+ ServiceRecord record = createRecord(PersistencePolicies.CONTAINER);
+ byte[] bytes = marshal.toByteswithHeader(record);
+ ServiceRecord r2 = marshal.fromBytesWithHeader("", bytes);
+ assertMatches(record, r2);
+
+ }
+
+ @Test(expected = NoRecordException.class)
+ public void testRoundTripBadHeaders() throws Throwable {
+ ServiceRecord record = createRecord(PersistencePolicies.APPLICATION);
+ byte[] bytes = marshal.toByteswithHeader(record);
+ bytes[1] = 0x01;
+ marshal.fromBytesWithHeader("src", bytes);
+ }
+
+ @Test(expected = NoRecordException.class)
+ public void testUnmarshallHeaderTooShort() throws Throwable {
+ marshal.fromBytesWithHeader("src", new byte[]{'a'});
+ }
+
+ @Test(expected = EOFException.class)
+ public void testUnmarshallNoBody() throws Throwable {
+ byte[] bytes = ServiceRecordHeader.getData();
+ marshal.fromBytesWithHeader("src", bytes);
+ }
+
+
+ @Test
+ public void testUnknownFieldsRoundTrip() throws Throwable {
+ ServiceRecord record =
+ createRecord(PersistencePolicies.APPLICATION_ATTEMPT);
+ record.set("key", "value");
+ record.set("intval", "2");
+ assertEquals("value", record.get("key"));
+ assertEquals("2", record.get("intval"));
+ assertNull(record.get("null"));
+ assertEquals("defval", record.get("null", "defval"));
+ byte[] bytes = marshal.toByteswithHeader(record);
+ ServiceRecord r2 = marshal.fromBytesWithHeader("", bytes);
+ assertEquals("value", r2.get("key"));
+ assertEquals("2", r2.get("intval"));
+ }
+
+ @Test
+ public void testFieldPropagationInCopy() throws Throwable {
+ ServiceRecord record =
+ createRecord(PersistencePolicies.APPLICATION_ATTEMPT);
+ record.set("key", "value");
+ record.set("intval", "2");
+ ServiceRecord that = new ServiceRecord(record);
+ assertMatches(record, that);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java
new file mode 100644
index 0000000000..b86e3fe5e0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Tests for the {@link RegistryUtils} class
+ */
+public class TestRegistryOperationUtils extends Assert {
+
+ @Test
+ public void testUsernameExtractionEnvVarOverrride() throws Throwable {
+ String whoami = RegistryUtils.getCurrentUsernameUnencoded("drwho");
+ assertEquals("drwho", whoami);
+
+ }
+
+ @Test
+ public void testUsernameExtractionCurrentuser() throws Throwable {
+ String whoami = RegistryUtils.getCurrentUsernameUnencoded("");
+ String ugiUser = UserGroupInformation.getCurrentUser().getShortUserName();
+
+ assertEquals(ugiUser, whoami);
+
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java
new file mode 100644
index 0000000000..9a24f1c9c8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.binding;
+
+import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.*;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRegistryPathUtils extends Assert {
+
+
+ public static final String EURO = "\u20AC";
+
+ @Test
+ public void testFormatAscii() throws Throwable {
+
+ String in = "hostname01101101-1";
+ assertConverted(in, in);
+ }
+
+ /*
+ * Euro symbol
+ */
+ @Test
+ public void testFormatEuroSymbol() throws Throwable {
+ assertConverted("xn--lzg", EURO);
+ }
+
+ @Test
+ public void testFormatIdempotent() throws Throwable {
+ assertConverted("xn--lzg", RegistryPathUtils.encodeForRegistry(EURO));
+ }
+
+ @Test
+ public void testFormatCyrillicSpaced() throws Throwable {
+ assertConverted("xn--pa 3-k4di", "\u0413PA\u0414 3");
+ }
+
+ protected void assertConverted(String expected, String in) {
+ String out = RegistryPathUtils.encodeForRegistry(in);
+ assertEquals("Conversion of " + in, expected, out);
+ }
+
+ @Test
+ public void testPaths() throws Throwable {
+ assertCreatedPathEquals("/", "/", "");
+ assertCreatedPathEquals("/", "", "");
+ assertCreatedPathEquals("/", "", "/");
+ assertCreatedPathEquals("/", "/", "/");
+
+ assertCreatedPathEquals("/a", "/a", "");
+ assertCreatedPathEquals("/a", "/", "a");
+ assertCreatedPathEquals("/a/b", "/a", "b");
+ assertCreatedPathEquals("/a/b", "/a/", "b");
+ assertCreatedPathEquals("/a/b", "/a", "/b");
+ assertCreatedPathEquals("/a/b", "/a", "/b/");
+ assertCreatedPathEquals("/a", "/a", "/");
+ assertCreatedPathEquals("/alice", "/", "/alice");
+ assertCreatedPathEquals("/alice", "/alice", "/");
+ }
+
+
+
+
+ @Test
+ public void testComplexPaths() throws Throwable {
+ assertCreatedPathEquals("/", "", "");
+ assertCreatedPathEquals("/yarn/registry/users/hadoop/org-apache-hadoop",
+ "/yarn/registry",
+ "users/hadoop/org-apache-hadoop/");
+ }
+
+
+ private static void assertCreatedPathEquals(String expected, String base,
+ String path) throws IOException {
+ String fullPath = createFullPath(base, path);
+ assertEquals("\"" + base + "\" + \"" + path + "\" =\"" + fullPath + "\"",
+ expected, fullPath);
+ }
+
+ @Test
+ public void testSplittingEmpty() throws Throwable {
+ assertEquals(0, split("").size());
+ assertEquals(0, split("/").size());
+ assertEquals(0, split("///").size());
+ }
+
+
+ @Test
+ public void testSplitting() throws Throwable {
+ assertEquals(1, split("/a").size());
+ assertEquals(0, split("/").size());
+ assertEquals(3, split("/a/b/c").size());
+ assertEquals(3, split("/a/b/c/").size());
+ assertEquals(3, split("a/b/c").size());
+ assertEquals(3, split("/a/b//c").size());
+ assertEquals(3, split("//a/b/c/").size());
+ List split = split("//a/b/c/");
+ assertEquals("a", split.get(0));
+ assertEquals("b", split.get(1));
+ assertEquals("c", split.get(2));
+ }
+
+ @Test
+ public void testParentOf() throws Throwable {
+ assertEquals("/", parentOf("/a"));
+ assertEquals("/", parentOf("/a/"));
+ assertEquals("/a", parentOf("/a/b"));
+ assertEquals("/a/b", parentOf("/a/b/c"));
+ }
+
+ @Test
+ public void testLastPathEntry() throws Throwable {
+ assertEquals("",lastPathEntry("/"));
+ assertEquals("",lastPathEntry("//"));
+ assertEquals("c",lastPathEntry("/a/b/c"));
+ assertEquals("c",lastPathEntry("/a/b/c/"));
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testParentOfRoot() throws Throwable {
+ parentOf("/");
+ }
+
+ @Test
+ public void testValidPaths() throws Throwable {
+ assertValidPath("/");
+ assertValidPath("/a/b/c");
+ assertValidPath("/users/drwho/org-apache-hadoop/registry/appid-55-55");
+ assertValidPath("/a50");
+ }
+
+ @Test
+ public void testInvalidPaths() throws Throwable {
+ assertInvalidPath("/a_b");
+ assertInvalidPath("/UpperAndLowerCase");
+ assertInvalidPath("/space in string");
+// Is this valid? assertInvalidPath("/50");
+ }
+
+
+ private void assertValidPath(String path) throws InvalidPathnameException {
+ validateZKPath(path);
+ }
+
+
+ private void assertInvalidPath(String path) throws InvalidPathnameException {
+ try {
+ validateElementsAsDNS(path);
+ fail("path considered valid: " + path);
+ } catch (InvalidPathnameException expected) {
+ // expected
+ }
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/CuratorEventCatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/CuratorEventCatcher.java
new file mode 100644
index 0000000000..254ab79c6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/CuratorEventCatcher.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.api.BackgroundCallback;
+import org.apache.curator.framework.api.CuratorEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * This is a little event catcher for curator asynchronous
+ * operations.
+ */
+public class CuratorEventCatcher implements BackgroundCallback {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CuratorEventCatcher.class);
+
+ public final BlockingQueue
+ events = new LinkedBlockingQueue(1);
+
+ private final AtomicInteger eventCounter = new AtomicInteger(0);
+
+
+ @Override
+ public void processResult(CuratorFramework client,
+ CuratorEvent event) throws
+ Exception {
+ LOG.info("received {}", event);
+ eventCounter.incrementAndGet();
+ events.put(event);
+ }
+
+
+ public int getCount() {
+ return eventCounter.get();
+ }
+
+ /**
+ * Blocking operation to take the first event off the queue
+ * @return the first event on the queue, when it arrives
+ * @throws InterruptedException if interrupted
+ */
+ public CuratorEvent take() throws InterruptedException {
+ return events.take();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestCuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestCuratorService.java
new file mode 100644
index 0000000000..3c8b1d12bf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestCuratorService.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import org.apache.curator.framework.api.CuratorEvent;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.registry.AbstractZKRegistryTest;
+import org.apache.hadoop.registry.client.impl.zk.CuratorService;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.data.ACL;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Test the curator service
+ */
+public class TestCuratorService extends AbstractZKRegistryTest {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestCuratorService.class);
+
+
+ protected CuratorService curatorService;
+
+ public static final String MISSING = "/missing";
+ private List rootACL;
+
+ @Before
+ public void startCurator() throws IOException {
+ createCuratorService();
+ }
+
+ @After
+ public void stopCurator() {
+ ServiceOperations.stop(curatorService);
+ }
+
+ /**
+ * Create an instance
+ */
+ protected void createCuratorService() throws IOException {
+ curatorService = new CuratorService("curatorService");
+ curatorService.init(createRegistryConfiguration());
+ curatorService.start();
+ rootACL = RegistrySecurity.WorldReadWriteACL;
+ curatorService.maybeCreate("", CreateMode.PERSISTENT, rootACL, true);
+ }
+
+ @Test
+ public void testLs() throws Throwable {
+ curatorService.zkList("/");
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testLsNotFound() throws Throwable {
+ List ls = curatorService.zkList(MISSING);
+ }
+
+ @Test
+ public void testExists() throws Throwable {
+ assertTrue(curatorService.zkPathExists("/"));
+ }
+
+ @Test
+ public void testExistsMissing() throws Throwable {
+ assertFalse(curatorService.zkPathExists(MISSING));
+ }
+
+ @Test
+ public void testVerifyExists() throws Throwable {
+ pathMustExist("/");
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testVerifyExistsMissing() throws Throwable {
+ pathMustExist("/file-not-found");
+ }
+
+ @Test
+ public void testMkdirs() throws Throwable {
+ mkPath("/p1", CreateMode.PERSISTENT);
+ pathMustExist("/p1");
+ mkPath("/p1/p2", CreateMode.EPHEMERAL);
+ pathMustExist("/p1/p2");
+ }
+
+ private void mkPath(String path, CreateMode mode) throws IOException {
+ curatorService.zkMkPath(path, mode, false,
+ RegistrySecurity.WorldReadWriteACL);
+ }
+
+ public void pathMustExist(String path) throws IOException {
+ curatorService.zkPathMustExist(path);
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testMkdirChild() throws Throwable {
+ mkPath("/testMkdirChild/child", CreateMode.PERSISTENT);
+ }
+
+ @Test
+ public void testMaybeCreate() throws Throwable {
+ assertTrue(curatorService.maybeCreate("/p3", CreateMode.PERSISTENT,
+ RegistrySecurity.WorldReadWriteACL, false));
+ assertFalse(curatorService.maybeCreate("/p3", CreateMode.PERSISTENT,
+ RegistrySecurity.WorldReadWriteACL, false));
+ }
+
+ @Test
+ public void testRM() throws Throwable {
+ mkPath("/rm", CreateMode.PERSISTENT);
+ curatorService.zkDelete("/rm", false, null);
+ verifyNotExists("/rm");
+ curatorService.zkDelete("/rm", false, null);
+ }
+
+ @Test
+ public void testRMNonRf() throws Throwable {
+ mkPath("/rm", CreateMode.PERSISTENT);
+ mkPath("/rm/child", CreateMode.PERSISTENT);
+ try {
+ curatorService.zkDelete("/rm", false, null);
+ fail("expected a failure");
+ } catch (PathIsNotEmptyDirectoryException expected) {
+
+ }
+ }
+
+ @Test
+ public void testRMRf() throws Throwable {
+ mkPath("/rm", CreateMode.PERSISTENT);
+ mkPath("/rm/child", CreateMode.PERSISTENT);
+ curatorService.zkDelete("/rm", true, null);
+ verifyNotExists("/rm");
+ curatorService.zkDelete("/rm", true, null);
+ }
+
+
+ @Test
+ public void testBackgroundDelete() throws Throwable {
+ mkPath("/rm", CreateMode.PERSISTENT);
+ mkPath("/rm/child", CreateMode.PERSISTENT);
+ CuratorEventCatcher events = new CuratorEventCatcher();
+ curatorService.zkDelete("/rm", true, events);
+ CuratorEvent taken = events.take();
+ LOG.info("took {}", taken);
+ assertEquals(1, events.getCount());
+ }
+
+ @Test
+ public void testCreate() throws Throwable {
+
+ curatorService.zkCreate("/testcreate",
+ CreateMode.PERSISTENT, getTestBuffer(),
+ rootACL
+ );
+ pathMustExist("/testcreate");
+ }
+
+ @Test
+ public void testCreateTwice() throws Throwable {
+ byte[] buffer = getTestBuffer();
+ curatorService.zkCreate("/testcreatetwice",
+ CreateMode.PERSISTENT, buffer,
+ rootACL);
+ try {
+ curatorService.zkCreate("/testcreatetwice",
+ CreateMode.PERSISTENT, buffer,
+ rootACL);
+ fail();
+ } catch (FileAlreadyExistsException e) {
+
+ }
+ }
+
+ @Test
+ public void testCreateUpdate() throws Throwable {
+ byte[] buffer = getTestBuffer();
+ curatorService.zkCreate("/testcreateupdate",
+ CreateMode.PERSISTENT, buffer,
+ rootACL
+ );
+ curatorService.zkUpdate("/testcreateupdate", buffer);
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testUpdateMissing() throws Throwable {
+ curatorService.zkUpdate("/testupdatemissing", getTestBuffer());
+ }
+
+ @Test
+ public void testUpdateDirectory() throws Throwable {
+ mkPath("/testupdatedirectory", CreateMode.PERSISTENT);
+ curatorService.zkUpdate("/testupdatedirectory", getTestBuffer());
+ }
+
+ @Test
+ public void testUpdateDirectorywithChild() throws Throwable {
+ mkPath("/testupdatedirectorywithchild", CreateMode.PERSISTENT);
+ mkPath("/testupdatedirectorywithchild/child", CreateMode.PERSISTENT);
+ curatorService.zkUpdate("/testupdatedirectorywithchild", getTestBuffer());
+ }
+
+ @Test
+ public void testUseZKServiceForBinding() throws Throwable {
+ CuratorService cs2 = new CuratorService("curator", zookeeper);
+ cs2.init(new Configuration());
+ cs2.start();
+ }
+
+ protected byte[] getTestBuffer() {
+ byte[] buffer = new byte[1];
+ buffer[0] = '0';
+ return buffer;
+ }
+
+
+ public void verifyNotExists(String path) throws IOException {
+ if (curatorService.zkPathExists(path)) {
+ fail("Path should not exist: " + path);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestMicroZookeeperService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestMicroZookeeperService.java
new file mode 100644
index 0000000000..4dfe453207
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestMicroZookeeperService.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.registry.server.services.MicroZookeeperService;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+
+/**
+ * Simple tests to look at the micro ZK service itself
+ */
+public class TestMicroZookeeperService extends Assert {
+
+ private MicroZookeeperService zookeeper;
+
+ @Rule
+ public final Timeout testTimeout = new Timeout(10000);
+ @Rule
+ public TestName methodName = new TestName();
+
+ @After
+ public void destroyZKServer() throws IOException {
+
+ ServiceOperations.stop(zookeeper);
+ }
+
+ @Test
+ public void testTempDirSupport() throws Throwable {
+ YarnConfiguration conf = new YarnConfiguration();
+ zookeeper = new MicroZookeeperService("t1");
+ zookeeper.init(conf);
+ zookeeper.start();
+ zookeeper.stop();
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java
new file mode 100644
index 0000000000..451a69b695
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.integration;
+
+import org.apache.curator.framework.api.BackgroundCallback;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.registry.AbstractRegistryTest;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper;
+import org.apache.hadoop.registry.client.impl.CuratorEventCatcher;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.apache.hadoop.registry.server.services.DeleteCompletionCallback;
+import org.apache.hadoop.registry.server.services.RegistryAdminService;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint;
+import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint;
+
+public class TestRegistryRMOperations extends AbstractRegistryTest {
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(TestRegistryRMOperations.class);
+
+ /**
+ * trigger a purge operation
+ * @param path path
+ * @param id yarn ID
+ * @param policyMatch policy to match ID on
+ * @param purgePolicy policy when there are children under a match
+ * @return the number purged
+ * @throws IOException
+ */
+ public int purge(String path,
+ String id,
+ String policyMatch,
+ RegistryAdminService.PurgePolicy purgePolicy) throws
+ IOException,
+ ExecutionException,
+ InterruptedException {
+ return purge(path, id, policyMatch, purgePolicy, null);
+ }
+
+ /**
+ *
+ * trigger a purge operation
+ * @param path pathn
+ * @param id yarn ID
+ * @param policyMatch policy to match ID on
+ * @param purgePolicy policy when there are children under a match
+ * @param callback optional callback
+ * @return the number purged
+ * @throws IOException
+ */
+ public int purge(String path,
+ String id,
+ String policyMatch,
+ RegistryAdminService.PurgePolicy purgePolicy,
+ BackgroundCallback callback) throws
+ IOException,
+ ExecutionException,
+ InterruptedException {
+
+ Future future = registry.purgeRecordsAsync(path,
+ id, policyMatch, purgePolicy, callback);
+ try {
+ return future.get();
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException) e.getCause();
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ @Test
+ public void testPurgeEntryCuratorCallback() throws Throwable {
+
+ String path = "/users/example/hbase/hbase1/";
+ ServiceRecord written = buildExampleServiceEntry(
+ PersistencePolicies.APPLICATION_ATTEMPT);
+ written.set(YarnRegistryAttributes.YARN_ID,
+ "testAsyncPurgeEntry_attempt_001");
+
+ operations.mknode(RegistryPathUtils.parentOf(path), true);
+ operations.bind(path, written, 0);
+
+ ZKPathDumper dump = registry.dumpPath(false);
+ CuratorEventCatcher events = new CuratorEventCatcher();
+
+ LOG.info("Initial state {}", dump);
+
+ // container query
+ String id = written.get(YarnRegistryAttributes.YARN_ID, "");
+ int opcount = purge("/",
+ id,
+ PersistencePolicies.CONTAINER,
+ RegistryAdminService.PurgePolicy.PurgeAll,
+ events);
+ assertPathExists(path);
+ assertEquals(0, opcount);
+ assertEquals("Event counter", 0, events.getCount());
+
+ // now the application attempt
+ opcount = purge("/",
+ id,
+ PersistencePolicies.APPLICATION_ATTEMPT,
+ RegistryAdminService.PurgePolicy.PurgeAll,
+ events);
+
+ LOG.info("Final state {}", dump);
+
+ assertPathNotFound(path);
+ assertEquals("wrong no of delete operations in " + dump, 1, opcount);
+ // and validate the callback event
+ assertEquals("Event counter", 1, events.getCount());
+ }
+
+ @Test
+ public void testAsyncPurgeEntry() throws Throwable {
+
+ String path = "/users/example/hbase/hbase1/";
+ ServiceRecord written = buildExampleServiceEntry(
+ PersistencePolicies.APPLICATION_ATTEMPT);
+ written.set(YarnRegistryAttributes.YARN_ID,
+ "testAsyncPurgeEntry_attempt_001");
+
+ operations.mknode(RegistryPathUtils.parentOf(path), true);
+ operations.bind(path, written, 0);
+
+ ZKPathDumper dump = registry.dumpPath(false);
+
+ LOG.info("Initial state {}", dump);
+
+ DeleteCompletionCallback deletions = new DeleteCompletionCallback();
+ int opcount = purge("/",
+ written.get(YarnRegistryAttributes.YARN_ID, ""),
+ PersistencePolicies.CONTAINER,
+ RegistryAdminService.PurgePolicy.PurgeAll,
+ deletions);
+ assertPathExists(path);
+
+ dump = registry.dumpPath(false);
+
+ assertEquals("wrong no of delete operations in " + dump, 0,
+ deletions.getEventCount());
+ assertEquals("wrong no of delete operations in " + dump, 0, opcount);
+
+
+ // now app attempt
+ deletions = new DeleteCompletionCallback();
+ opcount = purge("/",
+ written.get(YarnRegistryAttributes.YARN_ID, ""),
+ PersistencePolicies.APPLICATION_ATTEMPT,
+ RegistryAdminService.PurgePolicy.PurgeAll,
+ deletions);
+
+ dump = registry.dumpPath(false);
+ LOG.info("Final state {}", dump);
+
+ assertPathNotFound(path);
+ assertEquals("wrong no of delete operations in " + dump, 1,
+ deletions.getEventCount());
+ assertEquals("wrong no of delete operations in " + dump, 1, opcount);
+ // and validate the callback event
+
+ }
+
+ @Test
+ public void testPutGetContainerPersistenceServiceEntry() throws Throwable {
+
+ String path = ENTRY_PATH;
+ ServiceRecord written = buildExampleServiceEntry(
+ PersistencePolicies.CONTAINER);
+
+ operations.mknode(RegistryPathUtils.parentOf(path), true);
+ operations.bind(path, written, BindFlags.CREATE);
+ ServiceRecord resolved = operations.resolve(path);
+ validateEntry(resolved);
+ assertMatches(written, resolved);
+ }
+
+ /**
+ * Create a complex example app
+ * @throws Throwable
+ */
+ @Test
+ public void testCreateComplexApplication() throws Throwable {
+ String appId = "application_1408631738011_0001";
+ String cid = "container_1408631738011_0001_01_";
+ String cid1 = cid + "000001";
+ String cid2 = cid + "000002";
+ String appPath = USERPATH + "tomcat";
+
+ ServiceRecord webapp = createRecord(appId,
+ PersistencePolicies.APPLICATION, "tomcat-based web application",
+ null);
+ webapp.addExternalEndpoint(restEndpoint("www",
+ new URI("http", "//loadbalancer/", null)));
+
+ ServiceRecord comp1 = createRecord(cid1, PersistencePolicies.CONTAINER,
+ null,
+ null);
+ comp1.addExternalEndpoint(restEndpoint("www",
+ new URI("http", "//rack4server3:43572", null)));
+ comp1.addInternalEndpoint(
+ inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573));
+
+ // Component 2 has a container lifespan
+ ServiceRecord comp2 = createRecord(cid2, PersistencePolicies.CONTAINER,
+ null,
+ null);
+ comp2.addExternalEndpoint(restEndpoint("www",
+ new URI("http", "//rack1server28:35881", null)));
+ comp2.addInternalEndpoint(
+ inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882));
+
+ operations.mknode(USERPATH, false);
+ operations.bind(appPath, webapp, BindFlags.OVERWRITE);
+ String componentsPath = appPath + RegistryConstants.SUBPATH_COMPONENTS;
+ operations.mknode(componentsPath, false);
+ String dns1 = RegistryPathUtils.encodeYarnID(cid1);
+ String dns1path = componentsPath + dns1;
+ operations.bind(dns1path, comp1, BindFlags.CREATE);
+ String dns2 = RegistryPathUtils.encodeYarnID(cid2);
+ String dns2path = componentsPath + dns2;
+ operations.bind(dns2path, comp2, BindFlags.CREATE);
+
+ ZKPathDumper pathDumper = registry.dumpPath(false);
+ LOG.info(pathDumper.toString());
+
+ logRecord("tomcat", webapp);
+ logRecord(dns1, comp1);
+ logRecord(dns2, comp2);
+
+ ServiceRecord dns1resolved = operations.resolve(dns1path);
+ assertEquals("Persistence policies on resolved entry",
+ PersistencePolicies.CONTAINER,
+ dns1resolved.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
+
+ Map children =
+ RegistryUtils.statChildren(operations, componentsPath);
+ assertEquals(2, children.size());
+ Collection
+ componentStats = children.values();
+ Map records =
+ RegistryUtils.extractServiceRecords(operations,
+ componentsPath, componentStats);
+ assertEquals(2, records.size());
+ ServiceRecord retrieved1 = records.get(dns1path);
+ logRecord(retrieved1.get(YarnRegistryAttributes.YARN_ID, ""), retrieved1);
+ assertMatches(dns1resolved, retrieved1);
+ assertEquals(PersistencePolicies.CONTAINER,
+ retrieved1.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
+
+ // create a listing under components/
+ operations.mknode(componentsPath + "subdir", false);
+
+ // this shows up in the listing of child entries
+ Map childrenUpdated =
+ RegistryUtils.statChildren(operations, componentsPath);
+ assertEquals(3, childrenUpdated.size());
+
+ // the non-record child this is not picked up in the record listing
+ Map recordsUpdated =
+
+ RegistryUtils.extractServiceRecords(operations,
+ componentsPath,
+ childrenUpdated);
+ assertEquals(2, recordsUpdated.size());
+
+ // now do some deletions.
+
+ // synchronous delete container ID 2
+
+ // fail if the app policy is chosen
+ assertEquals(0, purge("/", cid2, PersistencePolicies.APPLICATION,
+ RegistryAdminService.PurgePolicy.FailOnChildren));
+ // succeed for container
+ assertEquals(1, purge("/", cid2, PersistencePolicies.CONTAINER,
+ RegistryAdminService.PurgePolicy.FailOnChildren));
+ assertPathNotFound(dns2path);
+ assertPathExists(dns1path);
+
+ // expect a skip on children to skip
+ assertEquals(0,
+ purge("/", appId, PersistencePolicies.APPLICATION,
+ RegistryAdminService.PurgePolicy.SkipOnChildren));
+ assertPathExists(appPath);
+ assertPathExists(dns1path);
+
+ // attempt to delete app with policy of fail on children
+ try {
+ int p = purge("/",
+ appId,
+ PersistencePolicies.APPLICATION,
+ RegistryAdminService.PurgePolicy.FailOnChildren);
+ fail("expected a failure, got a purge count of " + p);
+ } catch (PathIsNotEmptyDirectoryException expected) {
+ // expected
+ }
+ assertPathExists(appPath);
+ assertPathExists(dns1path);
+
+
+ // now trigger recursive delete
+ assertEquals(1,
+ purge("/", appId, PersistencePolicies.APPLICATION,
+ RegistryAdminService.PurgePolicy.PurgeAll));
+ assertPathNotFound(appPath);
+ assertPathNotFound(dns1path);
+
+ }
+
+ @Test
+ public void testChildDeletion() throws Throwable {
+ ServiceRecord app = createRecord("app1",
+ PersistencePolicies.APPLICATION, "app",
+ null);
+ ServiceRecord container = createRecord("container1",
+ PersistencePolicies.CONTAINER, "container",
+ null);
+
+ operations.bind("/app", app, BindFlags.OVERWRITE);
+ operations.bind("/app/container", container, BindFlags.OVERWRITE);
+
+ try {
+ int p = purge("/",
+ "app1",
+ PersistencePolicies.APPLICATION,
+ RegistryAdminService.PurgePolicy.FailOnChildren);
+ fail("expected a failure, got a purge count of " + p);
+ } catch (PathIsNotEmptyDirectoryException expected) {
+ // expected
+ }
+
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestYarnPolicySelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestYarnPolicySelector.java
new file mode 100644
index 0000000000..441b3d7e72
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestYarnPolicySelector.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.integration;
+
+import org.apache.hadoop.registry.RegistryTestHelper;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.server.integration.SelectByYarnPersistence;
+import org.apache.hadoop.registry.server.services.RegistryAdminService;
+import org.junit.Test;
+
+public class TestYarnPolicySelector extends RegistryTestHelper {
+
+
+ private ServiceRecord record = createRecord("1",
+ PersistencePolicies.APPLICATION, "one",
+ null);
+ private RegistryPathStatus status = new RegistryPathStatus("/", 0, 0, 1);
+
+ public void assertSelected(boolean outcome,
+ RegistryAdminService.NodeSelector selector) {
+ boolean select = selector.shouldSelect("/", status, record);
+ assertEquals(selector.toString(), outcome, select);
+ }
+
+ @Test
+ public void testByContainer() throws Throwable {
+ assertSelected(false,
+ new SelectByYarnPersistence("1",
+ PersistencePolicies.CONTAINER));
+ }
+
+ @Test
+ public void testByApp() throws Throwable {
+ assertSelected(true,
+ new SelectByYarnPersistence("1",
+ PersistencePolicies.APPLICATION));
+ }
+
+
+ @Test
+ public void testByAppName() throws Throwable {
+ assertSelected(false,
+ new SelectByYarnPersistence("2",
+ PersistencePolicies.APPLICATION));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java
new file mode 100644
index 0000000000..1cfb02553e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.operations;
+
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.AbstractRegistryTest;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class TestRegistryOperations extends AbstractRegistryTest {
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(TestRegistryOperations.class);
+
+ @Test
+ public void testPutGetServiceEntry() throws Throwable {
+ ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0,
+ PersistencePolicies.APPLICATION);
+ ServiceRecord resolved = operations.resolve(ENTRY_PATH);
+ validateEntry(resolved);
+ assertMatches(written, resolved);
+ }
+
+ @Test
+ public void testDeleteServiceEntry() throws Throwable {
+ putExampleServiceEntry(ENTRY_PATH, 0);
+ operations.delete(ENTRY_PATH, false);
+ }
+
+ @Test
+ public void testDeleteNonexistentEntry() throws Throwable {
+ operations.delete(ENTRY_PATH, false);
+ operations.delete(ENTRY_PATH, true);
+ }
+
+ @Test
+ public void testStat() throws Throwable {
+ putExampleServiceEntry(ENTRY_PATH, 0);
+ RegistryPathStatus stat = operations.stat(ENTRY_PATH);
+ assertTrue(stat.size > 0);
+ assertTrue(stat.time > 0);
+ assertEquals(NAME, stat.path);
+ }
+
+ @Test
+ public void testLsParent() throws Throwable {
+ ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0);
+ RegistryPathStatus stat = operations.stat(ENTRY_PATH);
+
+ List children = operations.list(PARENT_PATH);
+ assertEquals(1, children.size());
+ assertEquals(NAME, children.get(0));
+ Map childStats =
+ RegistryUtils.statChildren(operations, PARENT_PATH);
+ assertEquals(1, childStats.size());
+ assertEquals(stat, childStats.get(NAME));
+
+ Map records =
+ RegistryUtils.extractServiceRecords(operations,
+ PARENT_PATH,
+ childStats.values());
+ assertEquals(1, records.size());
+ ServiceRecord record = records.get(ENTRY_PATH);
+ assertNotNull(record);
+ record.validate();
+ assertMatches(written, record);
+
+ }
+
+ @Test
+ public void testDeleteNonEmpty() throws Throwable {
+ putExampleServiceEntry(ENTRY_PATH, 0);
+ try {
+ operations.delete(PARENT_PATH, false);
+ fail("Expected a failure");
+ } catch (PathIsNotEmptyDirectoryException expected) {
+ // expected; ignore
+ }
+ operations.delete(PARENT_PATH, true);
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testStatEmptyPath() throws Throwable {
+ operations.stat(ENTRY_PATH);
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testLsEmptyPath() throws Throwable {
+ operations.list(PARENT_PATH);
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testResolveEmptyPath() throws Throwable {
+ operations.resolve(ENTRY_PATH);
+ }
+
+ @Test
+ public void testMkdirNoParent() throws Throwable {
+ String path = ENTRY_PATH + "/missing";
+ try {
+ operations.mknode(path, false);
+ RegistryPathStatus stat = operations.stat(path);
+ fail("Got a status " + stat);
+ } catch (PathNotFoundException expected) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testDoubleMkdir() throws Throwable {
+ operations.mknode(USERPATH, false);
+ String path = USERPATH + "newentry";
+ assertTrue(operations.mknode(path, false));
+ operations.stat(path);
+ assertFalse(operations.mknode(path, false));
+ }
+
+ @Test
+ public void testPutNoParent() throws Throwable {
+ ServiceRecord record = new ServiceRecord();
+ record.set(YarnRegistryAttributes.YARN_ID, "testPutNoParent");
+ String path = "/path/without/parent";
+ try {
+ operations.bind(path, record, 0);
+ // didn't get a failure
+ // trouble
+ RegistryPathStatus stat = operations.stat(path);
+ fail("Got a status " + stat);
+ } catch (PathNotFoundException expected) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testPutMinimalRecord() throws Throwable {
+ String path = "/path/with/minimal";
+ operations.mknode(path, true);
+ ServiceRecord record = new ServiceRecord();
+ operations.bind(path, record, BindFlags.OVERWRITE);
+ ServiceRecord resolve = operations.resolve(path);
+ assertMatches(record, resolve);
+
+ }
+
+ @Test(expected = PathNotFoundException.class)
+ public void testPutNoParent2() throws Throwable {
+ ServiceRecord record = new ServiceRecord();
+ record.set(YarnRegistryAttributes.YARN_ID, "testPutNoParent");
+ String path = "/path/without/parent";
+ operations.bind(path, record, 0);
+ }
+
+ @Test
+ public void testStatDirectory() throws Throwable {
+ String empty = "/empty";
+ operations.mknode(empty, false);
+ operations.stat(empty);
+ }
+
+ @Test
+ public void testStatRootPath() throws Throwable {
+ operations.mknode("/", false);
+ operations.stat("/");
+ operations.list("/");
+ operations.list("/");
+ }
+
+ @Test
+ public void testStatOneLevelDown() throws Throwable {
+ operations.mknode("/subdir", true);
+ operations.stat("/subdir");
+ }
+
+ @Test
+ public void testLsRootPath() throws Throwable {
+ String empty = "/";
+ operations.mknode(empty, false);
+ operations.stat(empty);
+ }
+
+ @Test
+ public void testResolvePathThatHasNoEntry() throws Throwable {
+ String empty = "/empty2";
+ operations.mknode(empty, false);
+ try {
+ ServiceRecord record = operations.resolve(empty);
+ fail("expected an exception, got " + record);
+ } catch (NoRecordException expected) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testOverwrite() throws Throwable {
+ ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0);
+ ServiceRecord resolved1 = operations.resolve(ENTRY_PATH);
+ resolved1.description = "resolved1";
+ try {
+ operations.bind(ENTRY_PATH, resolved1, 0);
+ fail("overwrite succeeded when it should have failed");
+ } catch (FileAlreadyExistsException expected) {
+ // expected
+ }
+
+ // verify there's no changed
+ ServiceRecord resolved2 = operations.resolve(ENTRY_PATH);
+ assertMatches(written, resolved2);
+ operations.bind(ENTRY_PATH, resolved1, BindFlags.OVERWRITE);
+ ServiceRecord resolved3 = operations.resolve(ENTRY_PATH);
+ assertMatches(resolved1, resolved3);
+ }
+
+ @Test
+ public void testPutGetContainerPersistenceServiceEntry() throws Throwable {
+
+ String path = ENTRY_PATH;
+ ServiceRecord written = buildExampleServiceEntry(
+ PersistencePolicies.CONTAINER);
+
+ operations.mknode(RegistryPathUtils.parentOf(path), true);
+ operations.bind(path, written, BindFlags.CREATE);
+ ServiceRecord resolved = operations.resolve(path);
+ validateEntry(resolved);
+ assertMatches(written, resolved);
+ }
+
+ @Test
+ public void testAddingWriteAccessIsNoOpEntry() throws Throwable {
+
+ assertFalse(operations.addWriteAccessor("id","pass"));
+ operations.clearWriteAccessors();
+ }
+
+ @Test
+ public void testListListFully() throws Throwable {
+ ServiceRecord r1 = new ServiceRecord();
+ ServiceRecord r2 = createRecord("i",
+ PersistencePolicies.PERMANENT, "r2");
+
+ String path = USERPATH + SC_HADOOP + "/listing" ;
+ operations.mknode(path, true);
+ String r1path = path + "/r1";
+ operations.bind(r1path, r1, 0);
+ String r2path = path + "/r2";
+ operations.bind(r2path, r2, 0);
+
+ RegistryPathStatus r1stat = operations.stat(r1path);
+ assertEquals("r1", r1stat.path);
+ RegistryPathStatus r2stat = operations.stat(r2path);
+ assertEquals("r2", r2stat.path);
+ assertNotEquals(r1stat, r2stat);
+
+ // listings now
+ List list = operations.list(path);
+ assertEquals("Wrong no. of children", 2, list.size());
+ // there's no order here, so create one
+ Map names = new HashMap();
+ String entries = "";
+ for (String child : list) {
+ names.put(child, child);
+ entries += child + " ";
+ }
+ assertTrue("No 'r1' in " + entries,
+ names.containsKey("r1"));
+ assertTrue("No 'r2' in " + entries,
+ names.containsKey("r2"));
+
+ Map stats =
+ RegistryUtils.statChildren(operations, path);
+ assertEquals("Wrong no. of children", 2, stats.size());
+ assertEquals(r1stat, stats.get("r1"));
+ assertEquals(r2stat, stats.get("r2"));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java
new file mode 100644
index 0000000000..ca3f9c9031
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.secure;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.registry.RegistryTestHelper;
+import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
+import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
+import org.apache.hadoop.registry.server.services.AddingCompositeService;
+import org.apache.hadoop.registry.server.services.MicroZookeeperService;
+import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.LoginContext;
+import javax.security.auth.login.LoginException;
+import java.io.File;
+import java.io.IOException;
+import java.security.Principal;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+/**
+ * Add kerberos tests. This is based on the (JUnit3) KerberosSecurityTestcase
+ * and its test case, TestMiniKdc
+ */
+public class AbstractSecureRegistryTest extends RegistryTestHelper {
+ public static final String REALM = "EXAMPLE.COM";
+ public static final String ZOOKEEPER = "zookeeper";
+ public static final String ZOOKEEPER_LOCALHOST = "zookeeper/localhost";
+ public static final String ZOOKEEPER_REALM = "zookeeper@" + REALM;
+ public static final String ZOOKEEPER_CLIENT_CONTEXT = ZOOKEEPER;
+ public static final String ZOOKEEPER_SERVER_CONTEXT = "ZOOKEEPER_SERVER";
+ ;
+ public static final String ZOOKEEPER_LOCALHOST_REALM =
+ ZOOKEEPER_LOCALHOST + "@" + REALM;
+ public static final String ALICE = "alice";
+ public static final String ALICE_CLIENT_CONTEXT = "alice";
+ public static final String ALICE_LOCALHOST = "alice/localhost";
+ public static final String BOB = "bob";
+ public static final String BOB_CLIENT_CONTEXT = "bob";
+ public static final String BOB_LOCALHOST = "bob/localhost";
+
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractSecureRegistryTest.class);
+
+ public static final Configuration CONF;
+
+ static {
+ CONF = new Configuration();
+ CONF.set("hadoop.security.authentication", "kerberos");
+ CONF.setBoolean("hadoop.security.authorization", true);
+ }
+
+ private static final AddingCompositeService classTeardown =
+ new AddingCompositeService("classTeardown");
+
+ // static initializer guarantees it is always started
+ // ahead of any @BeforeClass methods
+ static {
+ classTeardown.init(CONF);
+ classTeardown.start();
+ }
+
+ public static final String SUN_SECURITY_KRB5_DEBUG =
+ "sun.security.krb5.debug";
+
+ private final AddingCompositeService teardown =
+ new AddingCompositeService("teardown");
+
+ protected static MiniKdc kdc;
+ protected static File keytab_zk;
+ protected static File keytab_bob;
+ protected static File keytab_alice;
+ protected static File kdcWorkDir;
+ protected static Properties kdcConf;
+ protected static RegistrySecurity registrySecurity;
+
+ @Rule
+ public final Timeout testTimeout = new Timeout(900000);
+
+ @Rule
+ public TestName methodName = new TestName();
+ protected MicroZookeeperService secureZK;
+ protected static File jaasFile;
+ private LoginContext zookeeperLogin;
+
+ /**
+ * All class initialization for this test class
+ * @throws Exception
+ */
+ @BeforeClass
+ public static void beforeSecureRegistryTestClass() throws Exception {
+ registrySecurity = new RegistrySecurity("registrySecurity");
+ registrySecurity.init(CONF);
+ setupKDCAndPrincipals();
+ RegistrySecurity.clearJaasSystemProperties();
+ RegistrySecurity.bindJVMtoJAASFile(jaasFile);
+ initHadoopSecurity();
+ }
+
+ @AfterClass
+ public static void afterSecureRegistryTestClass() throws
+ Exception {
+ describe(LOG, "teardown of class");
+ classTeardown.close();
+ teardownKDC();
+ }
+
+ /**
+ * give our thread a name
+ */
+ @Before
+ public void nameThread() {
+ Thread.currentThread().setName("JUnit");
+ }
+
+ /**
+ * For unknown reasons, the before-class setting of the JVM properties were
+ * not being picked up. This method addresses that by setting them
+ * before every test case
+ */
+ @Before
+ public void beforeSecureRegistryTest() {
+
+ }
+
+ @After
+ public void afterSecureRegistryTest() throws IOException {
+ describe(LOG, "teardown of instance");
+ teardown.close();
+ stopSecureZK();
+ }
+
+ protected static void addToClassTeardown(Service svc) {
+ classTeardown.addService(svc);
+ }
+
+ protected void addToTeardown(Service svc) {
+ teardown.addService(svc);
+ }
+
+
+ public static void teardownKDC() throws Exception {
+ if (kdc != null) {
+ kdc.stop();
+ kdc = null;
+ }
+ }
+
+ /**
+ * Sets up the KDC and a set of principals in the JAAS file
+ *
+ * @throws Exception
+ */
+ public static void setupKDCAndPrincipals() throws Exception {
+ // set up the KDC
+ File target = new File(System.getProperty("test.dir", "target"));
+ kdcWorkDir = new File(target, "kdc");
+ kdcWorkDir.mkdirs();
+ if (!kdcWorkDir.mkdirs()) {
+ assertTrue(kdcWorkDir.isDirectory());
+ }
+ kdcConf = MiniKdc.createConf();
+ kdcConf.setProperty(MiniKdc.DEBUG, "true");
+ kdc = new MiniKdc(kdcConf, kdcWorkDir);
+ kdc.start();
+
+ keytab_zk = createKeytab(ZOOKEEPER, "zookeeper.keytab");
+ keytab_alice = createKeytab(ALICE, "alice.keytab");
+ keytab_bob = createKeytab(BOB, "bob.keytab");
+
+ StringBuilder jaas = new StringBuilder(1024);
+ jaas.append(registrySecurity.createJAASEntry(ZOOKEEPER_CLIENT_CONTEXT,
+ ZOOKEEPER, keytab_zk));
+ jaas.append(registrySecurity.createJAASEntry(ZOOKEEPER_SERVER_CONTEXT,
+ ZOOKEEPER_LOCALHOST, keytab_zk));
+ jaas.append(registrySecurity.createJAASEntry(ALICE_CLIENT_CONTEXT,
+ ALICE_LOCALHOST , keytab_alice));
+ jaas.append(registrySecurity.createJAASEntry(BOB_CLIENT_CONTEXT,
+ BOB_LOCALHOST, keytab_bob));
+
+ jaasFile = new File(kdcWorkDir, "jaas.txt");
+ FileUtils.write(jaasFile, jaas.toString());
+ LOG.info("\n"+ jaas);
+ RegistrySecurity.bindJVMtoJAASFile(jaasFile);
+ }
+
+
+ //
+ protected static final String kerberosRule =
+ "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT";
+
+ /**
+ * Init hadoop security by setting up the UGI config
+ */
+ public static void initHadoopSecurity() {
+
+ UserGroupInformation.setConfiguration(CONF);
+
+ KerberosName.setRules(kerberosRule);
+ }
+
+ /**
+ * Stop the secure ZK and log out the ZK account
+ */
+ public synchronized void stopSecureZK() {
+ ServiceOperations.stop(secureZK);
+ secureZK = null;
+ logout(zookeeperLogin);
+ zookeeperLogin = null;
+ }
+
+
+ public static MiniKdc getKdc() {
+ return kdc;
+ }
+
+ public static File getKdcWorkDir() {
+ return kdcWorkDir;
+ }
+
+ public static Properties getKdcConf() {
+ return kdcConf;
+ }
+
+ /**
+ * Create a secure instance
+ * @param name instance name
+ * @return the instance
+ * @throws Exception
+ */
+ protected static MicroZookeeperService createSecureZKInstance(String name)
+ throws Exception {
+ String context = ZOOKEEPER_SERVER_CONTEXT;
+ Configuration conf = new Configuration();
+
+ File testdir = new File(System.getProperty("test.dir", "target"));
+ File workDir = new File(testdir, name);
+ if (!workDir.mkdirs()) {
+ assertTrue(workDir.isDirectory());
+ }
+ System.setProperty(
+ ZookeeperConfigOptions.PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE,
+ "false");
+ RegistrySecurity.validateContext(context);
+ conf.set(MicroZookeeperServiceKeys.KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT,
+ context);
+ MicroZookeeperService secureZK = new MicroZookeeperService(name);
+ secureZK.init(conf);
+ LOG.info(secureZK.getDiagnostics());
+ return secureZK;
+ }
+
+ /**
+ * Create the keytabl for the given principal, includes
+ * raw principal and $principal/localhost
+ * @param principal principal short name
+ * @param filename filename of keytab
+ * @return file of keytab
+ * @throws Exception
+ */
+ public static File createKeytab(String principal,
+ String filename) throws Exception {
+ assertNotEmpty("empty principal", principal);
+ assertNotEmpty("empty host", filename);
+ assertNotNull("Null KDC", kdc);
+ File keytab = new File(kdcWorkDir, filename);
+ kdc.createPrincipal(keytab, principal, principal +"/localhost");
+ return keytab;
+ }
+
+ public static String getPrincipalAndRealm(String principal) {
+ return principal + "@" + getRealm();
+ }
+
+ protected static String getRealm() {
+ return kdc.getRealm();
+ }
+
+
+ /**
+ * Log in, defaulting to the client context
+ * @param principal principal
+ * @param context context
+ * @param keytab keytab
+ * @return the logged in context
+ * @throws LoginException failure to log in
+ */
+ protected LoginContext login(String principal,
+ String context, File keytab) throws LoginException {
+ LOG.info("Logging in as {} in context {} with keytab {}",
+ principal, context, keytab);
+ Set principals = new HashSet();
+ principals.add(new KerberosPrincipal(principal));
+ Subject subject = new Subject(false, principals, new HashSet